hexsha
stringlengths 40
40
| size
int64 10
805k
| ext
stringclasses 6
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
176
| max_stars_repo_name
stringlengths 7
114
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
176
| max_issues_repo_name
stringlengths 7
114
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
48.5k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
176
| max_forks_repo_name
stringlengths 7
114
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 10
805k
| avg_line_length
float64 5.53
11k
| max_line_length
int64 10
129k
| alphanum_fraction
float64 0.13
0.93
| content_no_comment
stringlengths 0
449k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7174cab91de04cda9ea71e938b27612f1038cb9
| 11,857
|
py
|
Python
|
tests/test_stochatreat_assignment.py
|
RoyalTS/stochatreat
|
6e638e748b8638b64a185229f78967cf864cd45e
|
[
"MIT"
] | null | null | null |
tests/test_stochatreat_assignment.py
|
RoyalTS/stochatreat
|
6e638e748b8638b64a185229f78967cf864cd45e
|
[
"MIT"
] | 13
|
2019-07-11T13:13:39.000Z
|
2019-07-19T14:26:57.000Z
|
tests/test_stochatreat_assignment.py
|
RoyalTS/stochatreat
|
6e638e748b8638b64a185229f78967cf864cd45e
|
[
"MIT"
] | null | null | null |
import pytest
from math import gcd
import numpy as np
import pandas as pd
from stochatreat import stochatreat
from stochatreat import get_lcm_prob_denominators
################################################################################
# fixtures
################################################################################
@pytest.fixture(params=[10_000, 100_000])
def df(request):
N = request.param
df = pd.DataFrame(
data={
"id": np.arange(N),
"dummy": [1] * N,
"stratum1": np.random.randint(1, 100, size=N),
"stratum2": np.random.randint(0, 2, size=N),
}
)
return df
# a set of treatment assignment probabilities to throw at many tests
standard_probs = [[0.1, 0.9],
[1/3, 2/3],
[0.5, 0.5],
[2/3, 1/3],
[0.9, 0.1]]
# a set of stratum column combinations from the above df fixture to throw at
# many tests
standard_stratum_cols = [
["dummy"],
["stratum1"],
["stratum1", "stratum2"],
]
# a DataFrame and treatment assignment probabilities under which there will be
# no misfits
@pytest.fixture
def df_no_misfits():
N = 1_000
stratum_size = 10
df = pd.DataFrame(
data={
"id": np.arange(N),
"stratum": np.repeat(
np.arange(N / stratum_size),
repeats=stratum_size
)
}
)
return df
probs_no_misfits =[
[0.1, 0.9],
[0.5, 0.5],
[0.9, 0.1],
]
################################################################################
# overall treatment assignment proportions
################################################################################
@pytest.mark.parametrize("n_treats", [2, 3, 4, 5, 10])
@pytest.mark.parametrize("stratum_cols", standard_stratum_cols)
def test_stochatreat_no_probs(n_treats, stratum_cols, df):
"""
Tests that overall treatment assignment proportions across all strata are as
intended with equal treatment assignment probabilities -- relies on the Law
of Large Numbers, not deterministic
"""
treats = stochatreat(
data=df,
stratum_cols=stratum_cols,
treats=n_treats,
idx_col="id",
random_state=42
)
treatment_shares = treats.groupby('treat')['id'].size() / treats.shape[0]
np.testing.assert_almost_equal(
treatment_shares, np.array([1 / n_treats] * n_treats), decimal=2
)
@pytest.mark.parametrize("probs", standard_probs)
@pytest.mark.parametrize("stratum_cols", standard_stratum_cols)
def test_stochatreat_probs(probs, stratum_cols, df):
"""
Tests that overall treatment assignment proportions across all strata are as
intended with unequal treatment assignment probabilities -- relies on the
Law of Large Numbers, not deterministic
"""
treats = stochatreat(
data=df,
stratum_cols=stratum_cols,
treats=len(probs),
idx_col="id",
probs=probs,
random_state=42,
)
treatment_shares = treats.groupby('treat')['id'].size() / treats.shape[0]
np.testing.assert_almost_equal(
treatment_shares, np.array(probs), decimal=2
)
@pytest.mark.parametrize("probs", probs_no_misfits)
def test_stochatreat_no_misfits(probs, df_no_misfits):
"""
Tests that overall treatment assignment proportions across all strata are as
intended when strata are such that there are no misfits
"""
treats = stochatreat(
data=df_no_misfits,
stratum_cols=["stratum"],
treats=len(probs),
idx_col="id",
probs=probs,
random_state=42,
)
treatment_shares = treats.groupby('treat')['id'].size() / treats.shape[0]
np.testing.assert_almost_equal(
treatment_shares, np.array(probs), decimal=2
)
@pytest.mark.parametrize("probs", standard_probs)
def test_stochatreat_only_misfits(probs):
"""
Tests that overall treatment assignment proportions across all strata are as
intended when strata are such that there are only misfits and the number of
units is sufficiently large -- relies on the Law of Large Numbers, not
deterministic
"""
N = 10_000
df = pd.DataFrame(
data={
"id": np.arange(N),
"stratum": np.arange(N),
}
)
treats = stochatreat(
data=df,
stratum_cols=["stratum"],
treats=len(probs),
idx_col="id",
probs=probs,
random_state=42,
)
treatment_shares = treats.groupby('treat')['id'].size() / treats.shape[0]
np.testing.assert_almost_equal(
treatment_shares, np.array(probs), decimal=2
)
################################################################################
# within-stratum treatment assignments
################################################################################
def get_within_strata_counts(treats):
"""Helper function to compute the treatment shares within strata"""
treatment_counts = (treats
.groupby(["stratum_id", "treat"])[["id"]]
.count()
.rename(columns={"id": "treat_count"})
.reset_index()
)
stratum_counts = (treats
.groupby(["stratum_id"])[["id"]]
.count()
.rename(columns={"id": "stratum_count"})
.reset_index()
)
counts = pd.merge(
treatment_counts, stratum_counts, on="stratum_id", how="left"
)
return counts
def compute_count_diff(treats, probs):
"""
Helper function to compute the treatment counts within strata and line them
up with required counts, and returns the different treatment counts
aggregated at the stratum level as well as the dataframe with the different
counts used in some tests
"""
counts = get_within_strata_counts(treats)
required_props = pd.DataFrame(
{"required_prop": probs, "treat": range(len(probs))}
)
comp = pd.merge(
counts, required_props, on="treat", how="left"
)
comp["desired_counts"] = comp["stratum_count"] * comp["required_prop"]
comp["count_diff"] = (comp["treat_count"] - comp["desired_counts"]).abs()
return comp
@pytest.mark.parametrize("n_treats", [2, 3, 4, 5, 10])
@pytest.mark.parametrize("stratum_cols", standard_stratum_cols)
def test_stochatreat_within_strata_no_probs(n_treats, stratum_cols, df):
"""
Tests that within strata treatment assignment counts are only as far from
the required counts as misfit assignment randomization allows with equal
treatment assignment probabilities but a differing number of treatments
"""
probs = n_treats * [1 / n_treats]
lcm_prob_denominators = n_treats
treats = stochatreat(
data=df,
stratum_cols=stratum_cols,
treats=n_treats,
idx_col="id",
random_state=42
)
comp = compute_count_diff(treats, probs)
assert_msg = """The counts differences exceed the bound that misfit
allocation should not exceed"""
assert (comp["count_diff"] < lcm_prob_denominators).all(), assert_msg
@pytest.mark.parametrize("probs", standard_probs)
@pytest.mark.parametrize("stratum_cols", standard_stratum_cols)
def test_stochatreat_within_strata_probs(probs, stratum_cols, df):
"""
Tests that within strata treatment assignment counts are only as far from
the required counts as misfit assignment randomization allows with two
treatments but unequal treatment assignment probabilities
"""
lcm_prob_denominators = get_lcm_prob_denominators(probs)
treats = stochatreat(
data=df,
stratum_cols=stratum_cols,
treats=len(probs),
idx_col="id",
probs=probs,
random_state=42,
)
comp = compute_count_diff(treats, probs)
assert_msg = """The counts differences exceed the bound that misfit
allocation should not exceed"""
assert (comp["count_diff"] < lcm_prob_denominators).all(), assert_msg
@pytest.mark.parametrize("probs", probs_no_misfits)
def test_stochatreat_within_strata_no_misfits(probs, df_no_misfits):
"""
Tests that within strata treatment assignment counts are exactly equal to
the required counts when strata are such that there are no misfits
"""
treats = stochatreat(
data=df_no_misfits,
stratum_cols=["stratum"],
treats=len(probs),
idx_col="id",
probs=probs,
random_state=42,
)
comp = compute_count_diff(treats, probs)
assert_msg = "The required proportions are not reached without misfits"
assert (comp["count_diff"] == 0).all(), assert_msg
@pytest.mark.parametrize("probs", standard_probs)
@pytest.mark.parametrize("stratum_cols", standard_stratum_cols)
def test_stochatreat_global_strategy(probs, stratum_cols, df):
treats = stochatreat(
data=df,
stratum_cols=stratum_cols,
treats=len(probs),
idx_col="id",
probs=probs,
random_state=42,
misfit_strategy="global"
)
comp = compute_count_diff(treats, probs)
stratum_count_diff = comp.groupby(["stratum_id"])["count_diff"].sum()
assert_msg = "There is more than one stratum with misfits"
assert (stratum_count_diff != 0).sum() <= 1, assert_msg
@pytest.mark.parametrize("misfit_strategy", ["global", "stratum"])
@pytest.mark.parametrize("stratum_cols", standard_stratum_cols)
def test_stochatreat_stratum_ids(df, misfit_strategy, stratum_cols):
"""Tests that the function returns the right number of stratum ids"""
treats = stochatreat(
data=df,
stratum_cols=stratum_cols,
treats=2,
idx_col="id",
random_state=42,
misfit_strategy=misfit_strategy,
)
n_unique_strata = len(df[stratum_cols].drop_duplicates())
n_unique_stratum_ids = len(treats["stratum_id"].drop_duplicates())
if misfit_strategy == "global":
# depending on whether there are misfits
assert (
(n_unique_stratum_ids == n_unique_strata) or
(n_unique_stratum_ids - 1 == n_unique_strata)
)
else:
assert n_unique_stratum_ids == n_unique_strata
@pytest.mark.parametrize("stratum_cols", standard_stratum_cols)
@pytest.mark.parametrize("misfit_strategy", ["global", "stratum"])
def test_stochatreat_random_state(df, stratum_cols, misfit_strategy):
"""
Tests that the results are the same on two consecutive calls with the same
random state
"""
random_state = 42
treats = []
for _ in range(2):
treatments_i = stochatreat(
data=df,
stratum_cols=stratum_cols,
treats=2,
idx_col="id",
random_state=random_state,
misfit_strategy=misfit_strategy,
)
treats.append(treatments_i)
pd.testing.assert_series_equal(
treats[0]["treat"], treats[1]["treat"]
)
@pytest.mark.parametrize("stratum_cols", standard_stratum_cols)
@pytest.mark.parametrize("misfit_strategy", ["global", "stratum"])
def test_stochatreat_shuffle_data(df, stratum_cols, misfit_strategy):
"""
Tests that the mapping between idx_col and the assignments is the same on
two consecutive calls with the same random state and shuffled data points
"""
random_state = 42
treats = []
for _ in range(2):
treatments_i = stochatreat(
data=df,
stratum_cols=stratum_cols,
treats=2,
idx_col="id",
random_state=random_state,
misfit_strategy=misfit_strategy,
)
treatments_i = treatments_i.sort_values("id")
treats.append(treatments_i)
df = df.sample(len(df), random_state=random_state)
pd.testing.assert_series_equal(
treats[0]["treat"], treats[1]["treat"]
)
| 30.017722
| 80
| 0.627899
|
import pytest
from math import gcd
import numpy as np
import pandas as pd
from stochatreat import stochatreat
from stochatreat import get_lcm_prob_denominators
| true
| true
|
f7174cd8c1d5c09c4cbcb9df7c5490a9c0982657
| 1,305
|
py
|
Python
|
source/ship.py
|
seveirbian/Plane-game
|
96c5377e72d3dfb1c5720a1769e9db8e89624ed5
|
[
"MIT"
] | 1
|
2018-06-12T08:56:52.000Z
|
2018-06-12T08:56:52.000Z
|
source/ship.py
|
seveirbian/Plane-game
|
96c5377e72d3dfb1c5720a1769e9db8e89624ed5
|
[
"MIT"
] | null | null | null |
source/ship.py
|
seveirbian/Plane-game
|
96c5377e72d3dfb1c5720a1769e9db8e89624ed5
|
[
"MIT"
] | null | null | null |
import pygame
from pygame.sprite import Sprite
class Ship(Sprite):
def __init__(self, ai_settings, screen):
super().__init__()
'''初始化飞船并设置其初始位置'''
self.screen = screen
self.ai_settings = ai_settings
# 加载飞船图像并获取其外接矩形
self.image = pygame.image.load('../images/ship.png')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
# 将每艘新飞船放在屏幕底部中央
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
# 在飞船的属性center中存储小数值
self.center = float(self.rect.centerx)
# 移动标志
self.moving_right = False
self.moving_left = False
def blitme(self):
'''在指定位置绘制飞船'''
self.screen.blit(self.image, self.rect)
def update(self):
'''根据移动标志调整飞船的位置'''
# 更新飞船的center值,而不是rect
if self.moving_right and (self.rect.right < self.screen_rect.right):
self.center += self.ai_settings.ship_speed_factor
if self.moving_left and (self.rect.left > 0):
self.center -= self.ai_settings.ship_speed_factor
# 根据self.center值跟新rect对象
self.rect.centerx = self.center
def center_ship(self):
'''让飞船在屏幕上居中'''
self.center = self.screen_rect.centerx
| 29
| 76
| 0.622989
|
import pygame
from pygame.sprite import Sprite
class Ship(Sprite):
def __init__(self, ai_settings, screen):
super().__init__()
self.screen = screen
self.ai_settings = ai_settings
self.image = pygame.image.load('../images/ship.png')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
self.center = float(self.rect.centerx)
self.moving_right = False
self.moving_left = False
def blitme(self):
self.screen.blit(self.image, self.rect)
def update(self):
if self.moving_right and (self.rect.right < self.screen_rect.right):
self.center += self.ai_settings.ship_speed_factor
if self.moving_left and (self.rect.left > 0):
self.center -= self.ai_settings.ship_speed_factor
self.rect.centerx = self.center
def center_ship(self):
self.center = self.screen_rect.centerx
| true
| true
|
f7174db2dec152988a0f2418273a7a9086488a8b
| 4,836
|
py
|
Python
|
pynubank/nubank.py
|
FlavioMoreiraTec/nubank-flavio
|
224f483a7c7644116657c4f9e0929010ed511aa4
|
[
"MIT"
] | null | null | null |
pynubank/nubank.py
|
FlavioMoreiraTec/nubank-flavio
|
224f483a7c7644116657c4f9e0929010ed511aa4
|
[
"MIT"
] | null | null | null |
pynubank/nubank.py
|
FlavioMoreiraTec/nubank-flavio
|
224f483a7c7644116657c4f9e0929010ed511aa4
|
[
"MIT"
] | null | null | null |
import json
import os
import uuid
from typing import Tuple
import requests
from qrcode import QRCode
from requests import Response
PAYMENT_EVENT_TYPES = (
'TransferOutEvent',
'TransferInEvent',
'TransferOutReversalEvent',
'BarcodePaymentEvent',
'DebitPurchaseEvent',
'DebitPurchaseReversalEvent',
)
class NuException(Exception):
def __init__(self, status_code, response, url):
super().__init__(f'The request made failed with HTTP status code {status_code}')
self.url = url
self.status_code = status_code
self.response = response
class Nubank:
DISCOVERY_URL = 'https://prod-s0-webapp-proxy.nubank.com.br/api/discovery'
DISCOVERY_APP_URL = 'https://prod-s0-webapp-proxy.nubank.com.br/api/app/discovery'
auth_url = None
feed_url = None
proxy_list_url = None
proxy_list_app_url = None
query_url = None
bills_url = None
def __init__(self):
self.headers = {
'Content-Type': 'application/json',
'X-Correlation-Id': 'WEB-APP.pewW9',
'User-Agent': 'pynubank Client - https://github.com/andreroggeri/pynubank',
}
self._update_proxy_urls()
self.auth_url = self.proxy_list_url['login']
@staticmethod
def _get_query(query_name):
root = os.path.abspath(os.path.dirname(__file__))
gql_file = query_name + '.gql'
path = os.path.join(root, 'queries', gql_file)
with open(path) as gql:
return gql.read()
def _update_proxy_urls(self):
request = requests.get(self.DISCOVERY_URL, headers=self.headers)
self.proxy_list_url = json.loads(request.content.decode('utf-8'))
request = requests.get(self.DISCOVERY_APP_URL, headers=self.headers)
self.proxy_list_app_url = json.loads(request.content.decode('utf-8'))
def _make_graphql_request(self, graphql_object):
body = {
'query': self._get_query(graphql_object)
}
response = requests.post(self.query_url, json=body, headers=self.headers)
return self._handle_response(response)
def _password_auth(self, cpf: str, password: str):
payload = {
"grant_type": "password",
"login": cpf,
"password": password,
"client_id": "other.conta",
"client_secret": "yQPeLzoHuJzlMMSAjC-LgNUJdUecx8XO"
}
response = requests.post(self.auth_url, json=payload, headers=self.headers)
data = self._handle_response(response)
return data
def _handle_response(self, response: Response) -> dict:
if response.status_code != 200:
raise NuException(response.status_code, response.json(), response.url)
return response.json()
def get_qr_code(self) -> Tuple[str, QRCode]:
content = str(uuid.uuid4())
qr = QRCode()
qr.add_data(content)
return content, qr
def authenticate_with_qr_code(self, cpf: str, password, uuid: str):
auth_data = self._password_auth(cpf, password)
self.headers['Authorization'] = f'Bearer {auth_data["access_token"]}'
payload = {
'qr_code_id': uuid,
'type': 'login-webapp'
}
response = requests.post(self.proxy_list_app_url['lift'], json=payload, headers=self.headers)
auth_data = self._handle_response(response)
self.headers['Authorization'] = f'Bearer {auth_data["access_token"]}'
self.feed_url = auth_data['_links']['events']['href']
self.query_url = auth_data['_links']['ghostflame']['href']
self.bills_url = auth_data['_links']['bills_summary']['href']
def get_card_feed(self):
request = requests.get(self.feed_url, headers=self.headers)
return json.loads(request.content.decode('utf-8'))
def get_card_statements(self):
feed = self.get_card_feed()
return list(filter(lambda x: x['category'] == 'transaction', feed['events']))
def get_bills(self):
request = requests.get(self.bills_url, headers=self.headers)
return json.loads(request.content.decode('utf-8'))['bills']
def get_bill_details(self, bill):
request = requests.get(bill['_links']['self']['href'], headers=self.headers)
return json.loads(request.content.decode('utf-8'))
def get_account_feed(self):
data = self._make_graphql_request('account_feed')
return data['data']['viewer']['savingsAccount']['feed']
def get_account_statements(self):
feed = self.get_account_feed()
return list(filter(lambda x: x['__typename'] in PAYMENT_EVENT_TYPES, feed))
def get_account_balance(self):
data = self._make_graphql_request('account_balance')
return data['data']['viewer']['savingsAccount']['currentSavingsBalance']['netAmount']
| 35.29927
| 101
| 0.651778
|
import json
import os
import uuid
from typing import Tuple
import requests
from qrcode import QRCode
from requests import Response
PAYMENT_EVENT_TYPES = (
'TransferOutEvent',
'TransferInEvent',
'TransferOutReversalEvent',
'BarcodePaymentEvent',
'DebitPurchaseEvent',
'DebitPurchaseReversalEvent',
)
class NuException(Exception):
def __init__(self, status_code, response, url):
super().__init__(f'The request made failed with HTTP status code {status_code}')
self.url = url
self.status_code = status_code
self.response = response
class Nubank:
DISCOVERY_URL = 'https://prod-s0-webapp-proxy.nubank.com.br/api/discovery'
DISCOVERY_APP_URL = 'https://prod-s0-webapp-proxy.nubank.com.br/api/app/discovery'
auth_url = None
feed_url = None
proxy_list_url = None
proxy_list_app_url = None
query_url = None
bills_url = None
def __init__(self):
self.headers = {
'Content-Type': 'application/json',
'X-Correlation-Id': 'WEB-APP.pewW9',
'User-Agent': 'pynubank Client - https://github.com/andreroggeri/pynubank',
}
self._update_proxy_urls()
self.auth_url = self.proxy_list_url['login']
@staticmethod
def _get_query(query_name):
root = os.path.abspath(os.path.dirname(__file__))
gql_file = query_name + '.gql'
path = os.path.join(root, 'queries', gql_file)
with open(path) as gql:
return gql.read()
def _update_proxy_urls(self):
request = requests.get(self.DISCOVERY_URL, headers=self.headers)
self.proxy_list_url = json.loads(request.content.decode('utf-8'))
request = requests.get(self.DISCOVERY_APP_URL, headers=self.headers)
self.proxy_list_app_url = json.loads(request.content.decode('utf-8'))
def _make_graphql_request(self, graphql_object):
body = {
'query': self._get_query(graphql_object)
}
response = requests.post(self.query_url, json=body, headers=self.headers)
return self._handle_response(response)
def _password_auth(self, cpf: str, password: str):
payload = {
"grant_type": "password",
"login": cpf,
"password": password,
"client_id": "other.conta",
"client_secret": "yQPeLzoHuJzlMMSAjC-LgNUJdUecx8XO"
}
response = requests.post(self.auth_url, json=payload, headers=self.headers)
data = self._handle_response(response)
return data
def _handle_response(self, response: Response) -> dict:
if response.status_code != 200:
raise NuException(response.status_code, response.json(), response.url)
return response.json()
def get_qr_code(self) -> Tuple[str, QRCode]:
content = str(uuid.uuid4())
qr = QRCode()
qr.add_data(content)
return content, qr
def authenticate_with_qr_code(self, cpf: str, password, uuid: str):
auth_data = self._password_auth(cpf, password)
self.headers['Authorization'] = f'Bearer {auth_data["access_token"]}'
payload = {
'qr_code_id': uuid,
'type': 'login-webapp'
}
response = requests.post(self.proxy_list_app_url['lift'], json=payload, headers=self.headers)
auth_data = self._handle_response(response)
self.headers['Authorization'] = f'Bearer {auth_data["access_token"]}'
self.feed_url = auth_data['_links']['events']['href']
self.query_url = auth_data['_links']['ghostflame']['href']
self.bills_url = auth_data['_links']['bills_summary']['href']
def get_card_feed(self):
request = requests.get(self.feed_url, headers=self.headers)
return json.loads(request.content.decode('utf-8'))
def get_card_statements(self):
feed = self.get_card_feed()
return list(filter(lambda x: x['category'] == 'transaction', feed['events']))
def get_bills(self):
request = requests.get(self.bills_url, headers=self.headers)
return json.loads(request.content.decode('utf-8'))['bills']
def get_bill_details(self, bill):
request = requests.get(bill['_links']['self']['href'], headers=self.headers)
return json.loads(request.content.decode('utf-8'))
def get_account_feed(self):
data = self._make_graphql_request('account_feed')
return data['data']['viewer']['savingsAccount']['feed']
def get_account_statements(self):
feed = self.get_account_feed()
return list(filter(lambda x: x['__typename'] in PAYMENT_EVENT_TYPES, feed))
def get_account_balance(self):
data = self._make_graphql_request('account_balance')
return data['data']['viewer']['savingsAccount']['currentSavingsBalance']['netAmount']
| true
| true
|
f7174f069101d37e1152c091948b84f7ddc5aa8d
| 313
|
py
|
Python
|
py_merge/mergeexample.py
|
mutazag/misc
|
dfef362cdd835ef4efd1f2d02e13ff5297ccfc0f
|
[
"MIT"
] | null | null | null |
py_merge/mergeexample.py
|
mutazag/misc
|
dfef362cdd835ef4efd1f2d02e13ff5297ccfc0f
|
[
"MIT"
] | null | null | null |
py_merge/mergeexample.py
|
mutazag/misc
|
dfef362cdd835ef4efd1f2d02e13ff5297ccfc0f
|
[
"MIT"
] | null | null | null |
#%%
import pandas as pd
#%%
df1 = pd.read_csv('df1.csv', index_col=0)
# %%
df2 = pd.read_csv('df2.csv', index_col=0)
# %%
df3 = pd.read_csv('df3.csv', index_col=0)
# %%
df1.merge(df2, on='proj_id').merge(df3, on='doc_id')
# %%
df1.merge(df2, on='proj_id', how='left').merge(df3, on='doc_id', how='left')
# %%
| 18.411765
| 76
| 0.603834
|
import pandas as pd
df1 = pd.read_csv('df1.csv', index_col=0)
df2 = pd.read_csv('df2.csv', index_col=0)
df3 = pd.read_csv('df3.csv', index_col=0)
df1.merge(df2, on='proj_id').merge(df3, on='doc_id')
df1.merge(df2, on='proj_id', how='left').merge(df3, on='doc_id', how='left')
| true
| true
|
f7175023ba297508308f5f971d92777633745cb2
| 1,542
|
py
|
Python
|
tests/python/gaia-ui-tests/gaiatest/tests/functional/settings/test_settings_media_storage.py
|
BReduardokramer/gaia
|
c00302cdcd435ab193e8365917cfc6abac9e4f2e
|
[
"Apache-2.0"
] | 1
|
2021-11-09T00:27:34.000Z
|
2021-11-09T00:27:34.000Z
|
tests/python/gaia-ui-tests/gaiatest/tests/functional/settings/test_settings_media_storage.py
|
Delphine/gaia
|
df92f0ebd89efbc63570a61e70c4304c17b8b555
|
[
"Apache-2.0"
] | null | null | null |
tests/python/gaia-ui-tests/gaiatest/tests/functional/settings/test_settings_media_storage.py
|
Delphine/gaia
|
df92f0ebd89efbc63570a61e70c4304c17b8b555
|
[
"Apache-2.0"
] | null | null | null |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from gaiatest import GaiaTestCase
from gaiatest.apps.settings.app import Settings
class TestSettingsMediaStorage(GaiaTestCase):
def test_settings_media_storage(self):
settings = Settings(self.marionette)
settings.launch()
media_storage_settings = settings.open_media_storage_settings()
# Check that no media is on the device
self.assertEqual(media_storage_settings.music_size, '0 B')
self.assertEqual(media_storage_settings.pictures_size, '0 B')
self.assertEqual(media_storage_settings.movies_size, '0 B')
# Close the settings application
self.apps.kill(settings.app)
# Push media to the device
self.push_resource('VID_0001.3gp', destination='DCIM/100MZLLA')
self.push_resource('IMG_0001.jpg', destination='DCIM/100MZLLA')
self.push_resource('MUS_0001.mp3', destination='DCIM/100MZLLA')
# Access 'Media storage' in Settings
settings.launch()
media_storage_settings = settings.open_media_storage_settings()
# Check that media storage has updated to reflect the newly pushed media
self.assertEqual(media_storage_settings.music_size, '120 KB')
self.assertEqual(media_storage_settings.pictures_size, '348 KB')
self.assertEqual(media_storage_settings.movies_size, '120 KB')
| 40.578947
| 80
| 0.72179
|
from gaiatest import GaiaTestCase
from gaiatest.apps.settings.app import Settings
class TestSettingsMediaStorage(GaiaTestCase):
def test_settings_media_storage(self):
settings = Settings(self.marionette)
settings.launch()
media_storage_settings = settings.open_media_storage_settings()
self.assertEqual(media_storage_settings.music_size, '0 B')
self.assertEqual(media_storage_settings.pictures_size, '0 B')
self.assertEqual(media_storage_settings.movies_size, '0 B')
self.apps.kill(settings.app)
self.push_resource('VID_0001.3gp', destination='DCIM/100MZLLA')
self.push_resource('IMG_0001.jpg', destination='DCIM/100MZLLA')
self.push_resource('MUS_0001.mp3', destination='DCIM/100MZLLA')
settings.launch()
media_storage_settings = settings.open_media_storage_settings()
self.assertEqual(media_storage_settings.music_size, '120 KB')
self.assertEqual(media_storage_settings.pictures_size, '348 KB')
self.assertEqual(media_storage_settings.movies_size, '120 KB')
| true
| true
|
f71752ad85213d316ee14113c2e19d7243632bd1
| 17,836
|
py
|
Python
|
features/eolearn/features/radiometric_normalization.py
|
mohammadrezabk/eo-learn
|
8de3cfd64e74c1e4832e585954cdbf0ee9676eb3
|
[
"MIT"
] | null | null | null |
features/eolearn/features/radiometric_normalization.py
|
mohammadrezabk/eo-learn
|
8de3cfd64e74c1e4832e585954cdbf0ee9676eb3
|
[
"MIT"
] | null | null | null |
features/eolearn/features/radiometric_normalization.py
|
mohammadrezabk/eo-learn
|
8de3cfd64e74c1e4832e585954cdbf0ee9676eb3
|
[
"MIT"
] | null | null | null |
"""
Module for radiometric normalization
Credits:
Copyright (c) 2018-2019 Johannes Schmid (GeoVille)
Copyright (c) 2017-2019 Matej Aleksandrov, Matic Lubej, Devis Peresutti (Sinergise)
This source code is licensed under the MIT license found in the LICENSE
file in the root directory of this source tree.
"""
import numpy as np
from eolearn.core import EOTask, FeatureType
class ReferenceScenes(EOTask):
""" Creates a layer of reference scenes which have the highest fraction of valid pixels.
The number of reference scenes is limited to a definable number.
Contributor: Johannes Schmid, GeoVille Information Systems GmbH, 2018
:param feature: Name of the eopatch data layer. Needs to be of the FeatureType "DATA".
:type feature: (FeatureType, str) or (FeatureType, str, str)
:param valid_fraction_feature: Name of the layer containing the valid fraction obtained with the EOTask
'AddValidDataFraction'. Needs to be of the FeatureType "SCALAR".
:type valid_fraction_feature: (FeatureType, str)
:param max_scene_number: Maximum number of reference scenes taken for the creation of the composite. By default,
the maximum number of scenes equals the number of time frames
:type max_scene_number: int
"""
def __init__(self, feature, valid_fraction_feature, max_scene_number=None):
self.feature = self._parse_features(feature, new_names=True,
default_feature_type=FeatureType.DATA,
rename_function='{}_REFERENCE'.format)
self.valid_fraction_feature = self._parse_features(valid_fraction_feature,
default_feature_type=FeatureType.SCALAR)
self.number = max_scene_number
def execute(self, eopatch):
feature_type, feature_name, new_feature_name = next(self.feature(eopatch))
valid_fraction_feature_type, valid_fraction_feature_name = next(self.valid_fraction_feature(eopatch))
valid_frac = list(eopatch[valid_fraction_feature_type][valid_fraction_feature_name].flatten())
data = eopatch[feature_type][feature_name]
number = data.shape[0] if self.number is None else self.number
eopatch[feature_type][new_feature_name] = np.array([data[x] for _, x in
sorted(zip(valid_frac, range(data.shape[0])), reverse=True)
if x <= number-1])
return eopatch
class BaseCompositing(EOTask):
""" Base class to create a composite of reference scenes
Contributor: Johannes Schmid, GeoVille Information Systems GmbH, 2018
:param feature: Feature holding the input time-series. Default type is FeatureType.DATA
:type feature: (FeatureType, str)
:param feature_composite: Type and name of output composite image. Default type is FeatureType.DATA_TIMELESS
:type feature_composite: (FeatureType, str)
:param percentile: Percentile along the time dimension used for compositing. Methods use different percentiles
:type percentile: int or list
:param max_index: Value used to flag indices with NaNs. Could be integer or NaN. Default is 255
:type max_index: int or NaN
:param interpolation: Method used to compute percentile. Allowed values are {'geoville', 'linear', 'lower',
'higher', 'midpoint', 'nearest'}. 'geoville' interpolation performs a custom
implementation, while the other methods use the numpy `percentile` function. Default is
'lower'
:type interpolation: str
:param no_data_value: Value in the composite assigned to non valid data points. Default is NaN
:type no_data_value: float or NaN
"""
def __init__(self, feature, feature_composite, percentile=None, max_index=255, interpolation='lower',
no_data_value=np.nan):
self.feature = self._parse_features(feature,
default_feature_type=FeatureType.DATA,
rename_function='{}_COMPOSITE'.format)
self.composite_type, self.composite_name = next(
self._parse_features(feature_composite, default_feature_type=FeatureType.DATA_TIMELESS)())
self.percentile = percentile
self.max_index = max_index
self.interpolation = interpolation
self._index_by_percentile = self._geoville_index_by_percentile \
if self.interpolation.lower() == 'geoville' else self._numpy_index_by_percentile
self.no_data_value = no_data_value
def _numpy_index_by_percentile(self, data, percentile):
""" Calculate percentile of numpy stack and return the index of the chosen pixel.
numpy percentile function is used with one of the following interpolations {'linear', 'lower', 'higher',
'midpoint', 'nearest'}
"""
data_perc_low = np.nanpercentile(data, percentile, axis=0, interpolation=self.interpolation)
indices = np.empty(data_perc_low.shape, dtype=np.uint8)
indices[:] = np.nan
abs_diff = np.where(np.isnan(data_perc_low), np.inf, abs(data - data_perc_low))
indices = np.where(np.isnan(data_perc_low), self.max_index, np.nanargmin(abs_diff, axis=0))
return indices
def _geoville_index_by_percentile(self, data, percentile):
""" Calculate percentile of numpy stack and return the index of the chosen pixel. """
# no_obs = bn.allnan(arr_tmp["data"], axis=0)
data_tmp = np.array(data, copy=True)
valid_obs = np.sum(np.isfinite(data_tmp), axis=0)
# replace NaN with maximum
max_val = np.nanmax(data_tmp) + 1
data_tmp[np.isnan(data_tmp)] = max_val
# sort - former NaNs will move to the end
ind_tmp = np.argsort(data_tmp, kind="mergesort", axis=0)
# desired position as well as floor and ceiling of it
k_arr = (valid_obs - 1) * (percentile / 100.0)
k_arr = np.where(k_arr < 0, 0, k_arr)
f_arr = np.floor(k_arr + 0.5)
f_arr = f_arr.astype(int)
# get floor value of reference band and index band
ind = f_arr.astype("int16")
y_val, x_val = ind_tmp.shape[1], ind_tmp.shape[2]
y_val, x_val = np.ogrid[0:y_val, 0:x_val]
idx = np.where(valid_obs == 0, self.max_index, ind_tmp[ind, y_val, x_val])
return idx
def _get_reference_band(self, data):
""" Extract reference band from input 4D data according to compositing method
:param data: 4D array from which to extract reference band (e.g. blue, maxNDVI, ..)
:type data: numpy array
:return: 3D array containing reference band according to compositing method
"""
raise NotImplementedError
def _get_indices(self, data):
""" Compute indices along temporal dimension corresponding to the sought percentile
:param data: Input 3D array holding the reference band
:type data: numpy array
:return: 2D array holding the temporal index corresponding to percentile
"""
indices = self._index_by_percentile(data, self.percentile)
return indices
def execute(self, eopatch):
""" Compute composite array merging temporal frames according to the compositing method
:param eopatch: eopatch holding time-series
:return: eopatch with composite image of time-series
"""
feature_type, feature_name = next(self.feature(eopatch))
data = eopatch[feature_type][feature_name].copy()
# compute band according to compositing method (e.g. blue, maxNDVI, maxNDWI)
reference_bands = self._get_reference_band(data)
# find temporal indices corresponding to pre-defined percentile
indices = self._get_indices(reference_bands)
# compute composite image selecting values along temporal dimension corresponding to percentile indices
composite_image = np.empty((data.shape[1:]), np.float32)
composite_image[:] = self.no_data_value
for scene_id, scene in enumerate(data):
composite_image = np.where(np.dstack([indices]) == scene_id, scene, composite_image)
eopatch[self.composite_type][self.composite_name] = composite_image
return eopatch
class BlueCompositing(BaseCompositing):
""" Blue band compositing method
- blue (25th percentile of the blue band)
:param blue_idx: Index of blue band in `feature` array
:type blue_idx: int
"""
def __init__(self, feature, feature_composite, blue_idx, interpolation='lower'):
super().__init__(feature, feature_composite, percentile=25, interpolation=interpolation)
self.blue_idx = blue_idx
if not isinstance(blue_idx, int):
raise ValueError('Incorrect value of blue band index specified')
def _get_reference_band(self, data):
""" Extract the blue band from time-series
:param data: 4D array from which to extract the blue reference band
:type data: numpy array
:return: 3D array containing the blue reference band
"""
return data[..., self.blue_idx].astype("float32")
class HOTCompositing(BaseCompositing):
""" HOT compositing method
- HOT (Index using bands blue and red)
The HOT index is defined as per
Zhu, Z., & Woodcock, C. E. (2012). "Object-based cloud and cloud shadow detection in Landsat imagery."
Remote Sensing of Environment, 118, 83-94.
:param blue_idx: Index of blue band in `feature` array
:type blue_idx: int
:param red_idx: Index of red band in `feature` array
:type red_idx: int
"""
def __init__(self, feature, feature_composite, blue_idx, red_idx, interpolation='lower'):
super().__init__(feature, feature_composite, percentile=25, interpolation=interpolation)
self.blue_idx = blue_idx
self.red_idx = red_idx
if not isinstance(blue_idx, int) or not isinstance(red_idx, int):
raise ValueError('Incorrect values of blue and red band indices specified')
def _get_reference_band(self, data):
""" Extract the HOT band from time-series
:param data: 4D array from which to extract the HOT reference band
:type data: numpy array
:return: 3D array containing the HOT reference band
"""
return data[..., self.blue_idx] - 0.5 * data[..., self.red_idx] - 0.08
class MaxNDVICompositing(BaseCompositing):
""" maxNDVI compositing method
- maxNDVI (temporal maximum of NDVI)
:param red_idx: Index of red band in `feature` array
:type red_idx: int
:param nir_idx: Index of NIR band in `feature` array
:type nir_idx: int
"""
def __init__(self, feature, feature_composite, red_idx, nir_idx, interpolation='lower'):
super().__init__(feature, feature_composite, percentile=[0, 100], interpolation=interpolation)
self.red_idx = red_idx
self.nir_idx = nir_idx
if not isinstance(nir_idx, int) or not isinstance(red_idx, int):
raise ValueError('Incorrect values of red and NIR band indices specified')
def _get_reference_band(self, data):
""" Extract the NDVI band from time-series
:param data: 4D array from which to compute the NDVI reference band
:type data: numpy array
:return: 3D array containing the NDVI reference band
"""
nir = data[..., self.nir_idx].astype("float32")
red = data[..., self.red_idx].astype("float32")
return (nir - red) / (nir + red)
def _get_indices(self, data):
median = np.nanmedian(data, axis=0)
indices_min = self._index_by_percentile(data, self.percentile[0])
indices_max = self._index_by_percentile(data, self.percentile[1])
indices = np.where(median < -0.05, indices_min, indices_max)
return indices
class MaxNDWICompositing(BaseCompositing):
""" maxNDWI compositing method
- maxNDWI (temporal maximum of NDWI)
:param nir_idx: Index of NIR band in `feature` array
:type nir_idx: int
:param swir1_idx: Index of SWIR1 band in `feature` array
:type swir1_idx: int
"""
def __init__(self, feature, feature_composite, nir_idx, swir1_idx, interpolation='lower'):
super().__init__(feature, feature_composite, percentile=100, interpolation=interpolation)
self.nir_idx = nir_idx
self.swir1_idx = swir1_idx
if not isinstance(nir_idx, int) or not isinstance(swir1_idx, int):
raise ValueError('Incorrect values of NIR and SWIR1 band indices specified')
def _get_reference_band(self, data):
""" Extract the NDWI band from time-series
:param data: 4D array from which to compute the NDWI reference band
:type data: numpy array
:return: 3D array containing the NDWI reference band
"""
nir = data[..., self.nir_idx].astype("float32")
swir1 = data[..., self.swir1_idx].astype("float32")
return (nir - swir1) / (nir + swir1)
class MaxRatioCompositing(BaseCompositing):
""" maxRatio compositing method
- maxRatio (temporal maximum of a ratio using bands blue, NIR and SWIR)
:param blue_idx: Index of blue band in `feature` array
:type blue_idx: int
:param nir_idx: Index of NIR band in `feature` array
:type nir_idx: int
:param swir1_idx: Index of SWIR1 band in `feature` array
:type swir1_idx: int
"""
def __init__(self, feature, feature_composite, blue_idx, nir_idx, swir1_idx, interpolation='lower'):
super().__init__(feature, feature_composite, percentile=100, interpolation=interpolation)
self.blue_idx = blue_idx
self.nir_idx = nir_idx
self.swir1_idx = swir1_idx
if not isinstance(blue_idx, int) or not isinstance(nir_idx, int) or not isinstance(swir1_idx, int):
raise ValueError('Incorrect values for either blue, NIR or SWIR1 band indices specified')
def _get_reference_band(self, data):
""" Extract the max-ratio band from time-series
The max-ratio is defined as max(NIR,SWIR1)/BLUE
:param data: 4D array from which to compute the max-ratio reference band
:type data: numpy array
:return: 3D array containing the max-ratio reference band
"""
blue = data[..., self.blue_idx].astype("float32")
nir = data[..., self.nir_idx].astype("float32")
swir1 = data[..., self.swir1_idx].astype("float32")
return np.nanmax(np.array([nir, swir1]), axis=0) / blue
class HistogramMatching(EOTask):
""" Histogram match of each band of each scene within a time-series with respect to the corresponding band of a
reference composite.
Contributor: Johannes Schmid, GeoVille Information Systems GmbH, 2018
:param feature: Name of the eopatch data layer that will undergo a histogram match.
Should be of the FeatureType "DATA".
:type feature: (FeatureType, str) or (FeatureType, str, str)
:param reference: Name of the eopatch data layer that represents the reference for the histogram match.
Should be of the FeatureType "DATA_TIMELESS".
:type reference: (FeatureType, str)
"""
def __init__(self, feature, reference):
self.feature = self._parse_features(feature, new_names=True,
default_feature_type=FeatureType.DATA,
rename_function='{}_NORMALISED'.format)
self.reference = self._parse_features(reference, default_feature_type=FeatureType.DATA_TIMELESS)
def execute(self, eopatch):
""" Perform histogram matching of the time-series with respect to a reference scene
:param eopatch: eopatch holding the time-series and reference data
:type eopatch: EOPatch
:return: The same eopatch instance with the normalised time-series
"""
feature_type, feature_name, new_feature_name = next(self.feature(eopatch))
reference_type, reference_name = next(self.reference(eopatch))
reference_scene = eopatch[reference_type][reference_name]
# check if band dimension matches
if reference_scene.shape[-1] != eopatch[feature_type][feature_name].shape[-1]:
raise ValueError('Time-series and reference scene must have corresponding bands')
eopatch[feature_type][new_feature_name] = np.zeros_like(eopatch[feature_type][feature_name])
for source_id, source in enumerate(eopatch[feature_type][feature_name]):
# mask-out same invalid pixels
src_masked = np.where(np.isnan(reference_scene), np.nan, source)
ref_masked = np.where(np.isnan(source), np.nan, reference_scene)
# compute statistics
std_ref = np.nanstd(ref_masked, axis=(0, 1), dtype=np.float64)
std_src = np.nanstd(src_masked, axis=(0, 1), dtype=np.float64)
mean_ref = np.nanmean(ref_masked, axis=(0, 1), dtype=np.float64)
mean_src = np.nanmean(src_masked, axis=(0, 1), dtype=np.float64)
# normalise values
eopatch[feature_type][new_feature_name][source_id] = \
source * (std_ref / std_src) + (mean_ref - (mean_src * (std_ref / std_src)))
return eopatch
| 46.569191
| 120
| 0.655808
|
import numpy as np
from eolearn.core import EOTask, FeatureType
class ReferenceScenes(EOTask):
def __init__(self, feature, valid_fraction_feature, max_scene_number=None):
self.feature = self._parse_features(feature, new_names=True,
default_feature_type=FeatureType.DATA,
rename_function='{}_REFERENCE'.format)
self.valid_fraction_feature = self._parse_features(valid_fraction_feature,
default_feature_type=FeatureType.SCALAR)
self.number = max_scene_number
def execute(self, eopatch):
feature_type, feature_name, new_feature_name = next(self.feature(eopatch))
valid_fraction_feature_type, valid_fraction_feature_name = next(self.valid_fraction_feature(eopatch))
valid_frac = list(eopatch[valid_fraction_feature_type][valid_fraction_feature_name].flatten())
data = eopatch[feature_type][feature_name]
number = data.shape[0] if self.number is None else self.number
eopatch[feature_type][new_feature_name] = np.array([data[x] for _, x in
sorted(zip(valid_frac, range(data.shape[0])), reverse=True)
if x <= number-1])
return eopatch
class BaseCompositing(EOTask):
def __init__(self, feature, feature_composite, percentile=None, max_index=255, interpolation='lower',
no_data_value=np.nan):
self.feature = self._parse_features(feature,
default_feature_type=FeatureType.DATA,
rename_function='{}_COMPOSITE'.format)
self.composite_type, self.composite_name = next(
self._parse_features(feature_composite, default_feature_type=FeatureType.DATA_TIMELESS)())
self.percentile = percentile
self.max_index = max_index
self.interpolation = interpolation
self._index_by_percentile = self._geoville_index_by_percentile \
if self.interpolation.lower() == 'geoville' else self._numpy_index_by_percentile
self.no_data_value = no_data_value
def _numpy_index_by_percentile(self, data, percentile):
data_perc_low = np.nanpercentile(data, percentile, axis=0, interpolation=self.interpolation)
indices = np.empty(data_perc_low.shape, dtype=np.uint8)
indices[:] = np.nan
abs_diff = np.where(np.isnan(data_perc_low), np.inf, abs(data - data_perc_low))
indices = np.where(np.isnan(data_perc_low), self.max_index, np.nanargmin(abs_diff, axis=0))
return indices
def _geoville_index_by_percentile(self, data, percentile):
data_tmp = np.array(data, copy=True)
valid_obs = np.sum(np.isfinite(data_tmp), axis=0)
max_val = np.nanmax(data_tmp) + 1
data_tmp[np.isnan(data_tmp)] = max_val
ind_tmp = np.argsort(data_tmp, kind="mergesort", axis=0)
k_arr = (valid_obs - 1) * (percentile / 100.0)
k_arr = np.where(k_arr < 0, 0, k_arr)
f_arr = np.floor(k_arr + 0.5)
f_arr = f_arr.astype(int)
ind = f_arr.astype("int16")
y_val, x_val = ind_tmp.shape[1], ind_tmp.shape[2]
y_val, x_val = np.ogrid[0:y_val, 0:x_val]
idx = np.where(valid_obs == 0, self.max_index, ind_tmp[ind, y_val, x_val])
return idx
def _get_reference_band(self, data):
raise NotImplementedError
def _get_indices(self, data):
indices = self._index_by_percentile(data, self.percentile)
return indices
def execute(self, eopatch):
feature_type, feature_name = next(self.feature(eopatch))
data = eopatch[feature_type][feature_name].copy()
reference_bands = self._get_reference_band(data)
indices = self._get_indices(reference_bands)
composite_image = np.empty((data.shape[1:]), np.float32)
composite_image[:] = self.no_data_value
for scene_id, scene in enumerate(data):
composite_image = np.where(np.dstack([indices]) == scene_id, scene, composite_image)
eopatch[self.composite_type][self.composite_name] = composite_image
return eopatch
class BlueCompositing(BaseCompositing):
def __init__(self, feature, feature_composite, blue_idx, interpolation='lower'):
super().__init__(feature, feature_composite, percentile=25, interpolation=interpolation)
self.blue_idx = blue_idx
if not isinstance(blue_idx, int):
raise ValueError('Incorrect value of blue band index specified')
def _get_reference_band(self, data):
return data[..., self.blue_idx].astype("float32")
class HOTCompositing(BaseCompositing):
def __init__(self, feature, feature_composite, blue_idx, red_idx, interpolation='lower'):
super().__init__(feature, feature_composite, percentile=25, interpolation=interpolation)
self.blue_idx = blue_idx
self.red_idx = red_idx
if not isinstance(blue_idx, int) or not isinstance(red_idx, int):
raise ValueError('Incorrect values of blue and red band indices specified')
def _get_reference_band(self, data):
return data[..., self.blue_idx] - 0.5 * data[..., self.red_idx] - 0.08
class MaxNDVICompositing(BaseCompositing):
def __init__(self, feature, feature_composite, red_idx, nir_idx, interpolation='lower'):
super().__init__(feature, feature_composite, percentile=[0, 100], interpolation=interpolation)
self.red_idx = red_idx
self.nir_idx = nir_idx
if not isinstance(nir_idx, int) or not isinstance(red_idx, int):
raise ValueError('Incorrect values of red and NIR band indices specified')
def _get_reference_band(self, data):
nir = data[..., self.nir_idx].astype("float32")
red = data[..., self.red_idx].astype("float32")
return (nir - red) / (nir + red)
def _get_indices(self, data):
median = np.nanmedian(data, axis=0)
indices_min = self._index_by_percentile(data, self.percentile[0])
indices_max = self._index_by_percentile(data, self.percentile[1])
indices = np.where(median < -0.05, indices_min, indices_max)
return indices
class MaxNDWICompositing(BaseCompositing):
def __init__(self, feature, feature_composite, nir_idx, swir1_idx, interpolation='lower'):
super().__init__(feature, feature_composite, percentile=100, interpolation=interpolation)
self.nir_idx = nir_idx
self.swir1_idx = swir1_idx
if not isinstance(nir_idx, int) or not isinstance(swir1_idx, int):
raise ValueError('Incorrect values of NIR and SWIR1 band indices specified')
def _get_reference_band(self, data):
nir = data[..., self.nir_idx].astype("float32")
swir1 = data[..., self.swir1_idx].astype("float32")
return (nir - swir1) / (nir + swir1)
class MaxRatioCompositing(BaseCompositing):
def __init__(self, feature, feature_composite, blue_idx, nir_idx, swir1_idx, interpolation='lower'):
super().__init__(feature, feature_composite, percentile=100, interpolation=interpolation)
self.blue_idx = blue_idx
self.nir_idx = nir_idx
self.swir1_idx = swir1_idx
if not isinstance(blue_idx, int) or not isinstance(nir_idx, int) or not isinstance(swir1_idx, int):
raise ValueError('Incorrect values for either blue, NIR or SWIR1 band indices specified')
def _get_reference_band(self, data):
blue = data[..., self.blue_idx].astype("float32")
nir = data[..., self.nir_idx].astype("float32")
swir1 = data[..., self.swir1_idx].astype("float32")
return np.nanmax(np.array([nir, swir1]), axis=0) / blue
class HistogramMatching(EOTask):
def __init__(self, feature, reference):
self.feature = self._parse_features(feature, new_names=True,
default_feature_type=FeatureType.DATA,
rename_function='{}_NORMALISED'.format)
self.reference = self._parse_features(reference, default_feature_type=FeatureType.DATA_TIMELESS)
def execute(self, eopatch):
feature_type, feature_name, new_feature_name = next(self.feature(eopatch))
reference_type, reference_name = next(self.reference(eopatch))
reference_scene = eopatch[reference_type][reference_name]
if reference_scene.shape[-1] != eopatch[feature_type][feature_name].shape[-1]:
raise ValueError('Time-series and reference scene must have corresponding bands')
eopatch[feature_type][new_feature_name] = np.zeros_like(eopatch[feature_type][feature_name])
for source_id, source in enumerate(eopatch[feature_type][feature_name]):
src_masked = np.where(np.isnan(reference_scene), np.nan, source)
ref_masked = np.where(np.isnan(source), np.nan, reference_scene)
std_ref = np.nanstd(ref_masked, axis=(0, 1), dtype=np.float64)
std_src = np.nanstd(src_masked, axis=(0, 1), dtype=np.float64)
mean_ref = np.nanmean(ref_masked, axis=(0, 1), dtype=np.float64)
mean_src = np.nanmean(src_masked, axis=(0, 1), dtype=np.float64)
eopatch[feature_type][new_feature_name][source_id] = \
source * (std_ref / std_src) + (mean_ref - (mean_src * (std_ref / std_src)))
return eopatch
| true
| true
|
f71754673dd76b5b137364e722d76f8cba4d6ce8
| 3,160
|
py
|
Python
|
pypureclient/flasharray/FA_2_11/models/software_bundle_response.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 14
|
2018-12-07T18:30:27.000Z
|
2022-02-22T09:12:33.000Z
|
pypureclient/flasharray/FA_2_11/models/software_bundle_response.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 28
|
2019-09-17T21:03:52.000Z
|
2022-03-29T22:07:35.000Z
|
pypureclient/flasharray/FA_2_11/models/software_bundle_response.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 15
|
2020-06-11T15:50:08.000Z
|
2022-03-21T09:27:25.000Z
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.11
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_11 import models
class SoftwareBundleResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[SoftwareBundle]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.SoftwareBundle]
):
"""
Keyword args:
items (list[SoftwareBundle])
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SoftwareBundleResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SoftwareBundleResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SoftwareBundleResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.214286
| 105
| 0.549051
|
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_11 import models
class SoftwareBundleResponse(object):
swagger_types = {
'items': 'list[SoftwareBundle]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None,
):
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SoftwareBundleResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SoftwareBundleResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, SoftwareBundleResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f7175555296b859086cc2c753888bdfe21cb502e
| 936
|
py
|
Python
|
deoldify/save.py
|
TaktakTaktouk/DeOldify
|
0ff6139bb09d0abdf535a724f05bdad3ec04dcc1
|
[
"MIT"
] | 14,898
|
2018-11-01T14:48:38.000Z
|
2022-03-31T16:28:38.000Z
|
deoldify/save.py
|
TaktakTaktouk/DeOldify
|
0ff6139bb09d0abdf535a724f05bdad3ec04dcc1
|
[
"MIT"
] | 376
|
2018-11-02T18:22:23.000Z
|
2022-03-24T21:29:19.000Z
|
deoldify/save.py
|
TaktakTaktouk/DeOldify
|
0ff6139bb09d0abdf535a724f05bdad3ec04dcc1
|
[
"MIT"
] | 2,250
|
2018-11-02T15:45:39.000Z
|
2022-03-28T17:08:23.000Z
|
from fastai.basic_train import Learner, LearnerCallback
from fastai.vision.gan import GANLearner
class GANSaveCallback(LearnerCallback):
"""A `LearnerCallback` that saves history of metrics while training `learn` into CSV `filename`."""
def __init__(
self,
learn: GANLearner,
learn_gen: Learner,
filename: str,
save_iters: int = 1000,
):
super().__init__(learn)
self.learn_gen = learn_gen
self.filename = filename
self.save_iters = save_iters
def on_batch_end(self, iteration: int, epoch: int, **kwargs) -> None:
if iteration == 0:
return
if iteration % self.save_iters == 0:
self._save_gen_learner(iteration=iteration, epoch=epoch)
def _save_gen_learner(self, iteration: int, epoch: int):
filename = '{}_{}_{}'.format(self.filename, epoch, iteration)
self.learn_gen.save(filename)
| 31.2
| 103
| 0.645299
|
from fastai.basic_train import Learner, LearnerCallback
from fastai.vision.gan import GANLearner
class GANSaveCallback(LearnerCallback):
def __init__(
self,
learn: GANLearner,
learn_gen: Learner,
filename: str,
save_iters: int = 1000,
):
super().__init__(learn)
self.learn_gen = learn_gen
self.filename = filename
self.save_iters = save_iters
def on_batch_end(self, iteration: int, epoch: int, **kwargs) -> None:
if iteration == 0:
return
if iteration % self.save_iters == 0:
self._save_gen_learner(iteration=iteration, epoch=epoch)
def _save_gen_learner(self, iteration: int, epoch: int):
filename = '{}_{}_{}'.format(self.filename, epoch, iteration)
self.learn_gen.save(filename)
| true
| true
|
f71756f9227d14924ce1c8f11117e55a80ba40c0
| 1,904
|
py
|
Python
|
tests/test_args.py
|
rauljim/passgen
|
ca55c08b1ab0439d598dc045982b6971bfee1629
|
[
"MIT"
] | null | null | null |
tests/test_args.py
|
rauljim/passgen
|
ca55c08b1ab0439d598dc045982b6971bfee1629
|
[
"MIT"
] | null | null | null |
tests/test_args.py
|
rauljim/passgen
|
ca55c08b1ab0439d598dc045982b6971bfee1629
|
[
"MIT"
] | null | null | null |
from passgen import args
def test_num_words():
mock_argv = ['passgen', '-n', '22']
options = args.get_cli_options(mock_argv)
assert 22 == options.num_words
mock_argv = ['passgen', '--num-words', '33']
options = args.get_cli_options(mock_argv)
assert 33 == options.num_words
mock_argv = ['passgen']
options = args.get_cli_options(mock_argv)
assert args.DEFAULT_NUM_WORDS == options.num_words
def test_count():
mock_argv = ['passgen', '-c', '22']
options = args.get_cli_options(mock_argv)
assert 22 == options.count
mock_argv = ['passgen', '--count', '33']
options = args.get_cli_options(mock_argv)
assert 33 == options.count
mock_argv = ['passgen']
options = args.get_cli_options(mock_argv)
assert args.DEFAULT_COUNT == options.count
mock_argv = ['passgen', '--count', '-1'] # negative value ignored
options = args.get_cli_options(mock_argv)
assert args.DEFAULT_COUNT == options.count
def test_min_chars():
mock_argv = ['passgen', '--min-chars', '33']
options = args.get_cli_options(mock_argv)
assert 33 == options.min_chars
mock_argv = ['passgen']
options = args.get_cli_options(mock_argv)
assert args.DEFAULT_MIN_CHARS == options.min_chars
def test_max_chars():
mock_argv = ['passgen', '--max-chars', '33']
options = args.get_cli_options(mock_argv)
assert 33 == options.max_chars
mock_argv = ['passgen']
options = args.get_cli_options(mock_argv)
assert args.DEFAULT_MAX_CHARS == options.max_chars
def test_conflicting_min_max_chars():
mock_argv = ['passgen', '--min-chars', '9999', '--max-chars', '11']
options = args.get_cli_options(mock_argv)
assert 9999 == options.min_chars
assert args.DEFAULT_MAX_CHARS == options.max_chars
def test_get_defaults():
options = args.get_default_options()
assert args.DEFAULT_COUNT == options.count
| 32.271186
| 71
| 0.688025
|
from passgen import args
def test_num_words():
mock_argv = ['passgen', '-n', '22']
options = args.get_cli_options(mock_argv)
assert 22 == options.num_words
mock_argv = ['passgen', '--num-words', '33']
options = args.get_cli_options(mock_argv)
assert 33 == options.num_words
mock_argv = ['passgen']
options = args.get_cli_options(mock_argv)
assert args.DEFAULT_NUM_WORDS == options.num_words
def test_count():
mock_argv = ['passgen', '-c', '22']
options = args.get_cli_options(mock_argv)
assert 22 == options.count
mock_argv = ['passgen', '--count', '33']
options = args.get_cli_options(mock_argv)
assert 33 == options.count
mock_argv = ['passgen']
options = args.get_cli_options(mock_argv)
assert args.DEFAULT_COUNT == options.count
mock_argv = ['passgen', '--count', '-1']
options = args.get_cli_options(mock_argv)
assert args.DEFAULT_COUNT == options.count
def test_min_chars():
mock_argv = ['passgen', '--min-chars', '33']
options = args.get_cli_options(mock_argv)
assert 33 == options.min_chars
mock_argv = ['passgen']
options = args.get_cli_options(mock_argv)
assert args.DEFAULT_MIN_CHARS == options.min_chars
def test_max_chars():
mock_argv = ['passgen', '--max-chars', '33']
options = args.get_cli_options(mock_argv)
assert 33 == options.max_chars
mock_argv = ['passgen']
options = args.get_cli_options(mock_argv)
assert args.DEFAULT_MAX_CHARS == options.max_chars
def test_conflicting_min_max_chars():
mock_argv = ['passgen', '--min-chars', '9999', '--max-chars', '11']
options = args.get_cli_options(mock_argv)
assert 9999 == options.min_chars
assert args.DEFAULT_MAX_CHARS == options.max_chars
def test_get_defaults():
options = args.get_default_options()
assert args.DEFAULT_COUNT == options.count
| true
| true
|
f717576ebe1b232b2fdba0695ea262b2ae5063cc
| 2,568
|
py
|
Python
|
src/base/base_train.py
|
MohamedAli1995/Cifar-100-Classifier
|
924704a81ce13062825a88b90b80e8ac2ba45d63
|
[
"MIT"
] | 2
|
2019-05-12T16:11:20.000Z
|
2020-04-10T22:39:57.000Z
|
src/base/base_train.py
|
MohamedAli1995/Cifar-100-Classifier
|
924704a81ce13062825a88b90b80e8ac2ba45d63
|
[
"MIT"
] | null | null | null |
src/base/base_train.py
|
MohamedAli1995/Cifar-100-Classifier
|
924704a81ce13062825a88b90b80e8ac2ba45d63
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
class BaseTrain:
"""Standard base_train-class for easy multiple-inheritance.
It is responsible for defining the functions to be implemented with any child.
Attributes:
sess: Tensorflow session to use.
model: Model to be trained.
data: Data_loader object to interact with dataset.
config: Config object to store data related to training, testing and validation.
logger: Logger object to use tensorboard.
"""
def __init__(self, sess, model, data, config, logger):
self.model = model
self.config = config
self.sess = sess
self.data = data
self.logger = logger
if not self.config.pretrain: # If not pretrain then initialize variables.
self.init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
self.sess.run(self.init)
def train(self):
"""Train the model for the number of epochs in config.num_epochs.
Calls validate_epoch if config.use_val is set to true and per config.val_per_epoch.
Returns:
"""
for cur_epoch in range(self.model.cur_epoch_tensor.eval(self.sess), self.config.num_epochs + 1, 1):
self.data.prepare_new_epoch_data()
self.train_epoch()
if self.config.use_val and (
cur_epoch % self.config.val_per_epoch == 0 or cur_epoch == self.config.num_epochs):
self.validate_epoch()
self.sess.run(self.model.increment_cur_epoch_tensor)
def train_epoch(self):
"""Implements the logic of training_epoch:
-Loop over the batches of the training data and call the train step for each.
-Add any summaries you want using the summary
"""
raise NotImplemented
def train_step(self):
"""Implements the logic of the train step:
-Run the tensorflow session
-Returns:
Any of the metrics needs to be summarized.
"""
raise NotImplementedError
def validate_epoch(self):
"""Implements the logic of validation_epoch:
-Loop over the batches of the validation data and call the validate step for each.
-Add any summaries you want using the summary
"""
raise NotImplemented
def validate_step(self):
"""Implements the logic of the validate step:
-Run the tensorflow session
-Returns:
Any of the metrics needs to be summarized.
"""
raise NotImplemented
| 36.169014
| 115
| 0.640576
|
import tensorflow as tf
class BaseTrain:
def __init__(self, sess, model, data, config, logger):
self.model = model
self.config = config
self.sess = sess
self.data = data
self.logger = logger
if not self.config.pretrain:
self.init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
self.sess.run(self.init)
def train(self):
for cur_epoch in range(self.model.cur_epoch_tensor.eval(self.sess), self.config.num_epochs + 1, 1):
self.data.prepare_new_epoch_data()
self.train_epoch()
if self.config.use_val and (
cur_epoch % self.config.val_per_epoch == 0 or cur_epoch == self.config.num_epochs):
self.validate_epoch()
self.sess.run(self.model.increment_cur_epoch_tensor)
def train_epoch(self):
raise NotImplemented
def train_step(self):
raise NotImplementedError
def validate_epoch(self):
raise NotImplemented
def validate_step(self):
raise NotImplemented
| true
| true
|
f7175782c5dfa546dd124965a86fa50da687c7ac
| 1,267
|
py
|
Python
|
SIGNUS/app/api/signus_v1/post.py
|
837477/SIGNUS
|
cd395dfd45d2c36d09ec9a8069e6e52e19f058e8
|
[
"MIT"
] | null | null | null |
SIGNUS/app/api/signus_v1/post.py
|
837477/SIGNUS
|
cd395dfd45d2c36d09ec9a8069e6e52e19f058e8
|
[
"MIT"
] | null | null | null |
SIGNUS/app/api/signus_v1/post.py
|
837477/SIGNUS
|
cd395dfd45d2c36d09ec9a8069e6e52e19f058e8
|
[
"MIT"
] | null | null | null |
'''
SIGNUS V1 post API
'''
from flask import g
from app.api.signus_v1 import signus_v1 as api
from app.api.decorators import timer, login_required, login_optional
from app.controllers.post import (post_like,
post_unlike,
post_view)
@api.route("/post/like/<string:post_oid>", methods=["PATCH"])
@timer
@login_required
def signus_v1_post_like(post_oid):
''' 게시글 좋아요 '''
return {
"msg": "success",
"result": post_like(g.mongo_cur,
post_oid,
g.user)
}
@api.route("/post/unlike/<string:post_oid>", methods=["PATCH"])
@timer
@login_required
def signus_v1_post_unlike(post_oid):
''' 게시글 좋아요 취소 '''
return {
"msg": "success",
"result": post_unlike(g.mongo_cur,
post_oid,
g.user)
}
@api.route("/post/view/<string:post_oid>", methods=["PATCH"])
@timer
@login_optional
def signus_v1_post_view(post_oid):
''' 게시글 조회수 '''
if 'user' in g:
result = post_view(g.mongo_cur, post_oid, g.user)
else:
result = post_view(g.mongo_cur, post_oid)
return {
"msg": "success",
"result": result
}
| 24.843137
| 68
| 0.556433
|
from flask import g
from app.api.signus_v1 import signus_v1 as api
from app.api.decorators import timer, login_required, login_optional
from app.controllers.post import (post_like,
post_unlike,
post_view)
@api.route("/post/like/<string:post_oid>", methods=["PATCH"])
@timer
@login_required
def signus_v1_post_like(post_oid):
return {
"msg": "success",
"result": post_like(g.mongo_cur,
post_oid,
g.user)
}
@api.route("/post/unlike/<string:post_oid>", methods=["PATCH"])
@timer
@login_required
def signus_v1_post_unlike(post_oid):
return {
"msg": "success",
"result": post_unlike(g.mongo_cur,
post_oid,
g.user)
}
@api.route("/post/view/<string:post_oid>", methods=["PATCH"])
@timer
@login_optional
def signus_v1_post_view(post_oid):
if 'user' in g:
result = post_view(g.mongo_cur, post_oid, g.user)
else:
result = post_view(g.mongo_cur, post_oid)
return {
"msg": "success",
"result": result
}
| true
| true
|
f71759ee5d329c4385a20b4d6bd880bfb741c347
| 1,398
|
py
|
Python
|
migrations/versions/f0a99f6b5e5e_.py
|
CSCfi/pebbles
|
24b32e8fc538cc8095fda62c892a8221346c2bce
|
[
"MIT"
] | 4
|
2017-05-11T14:50:32.000Z
|
2020-01-10T09:02:27.000Z
|
migrations/versions/f0a99f6b5e5e_.py
|
CSCfi/pebbles
|
24b32e8fc538cc8095fda62c892a8221346c2bce
|
[
"MIT"
] | 145
|
2017-04-07T11:01:58.000Z
|
2019-12-11T15:30:23.000Z
|
migrations/versions/f0a99f6b5e5e_.py
|
CSCfi/pebbles
|
24b32e8fc538cc8095fda62c892a8221346c2bce
|
[
"MIT"
] | 3
|
2017-10-25T12:36:16.000Z
|
2018-04-26T08:49:34.000Z
|
"""empty message
Revision ID: f0a99f6b5e5e
Revises: he536vdwh29f
Create Date: 2019-05-31 15:57:36.032393
"""
# revision identifiers, used by Alembic.
revision = 'f0a99f6b5e5e'
down_revision = 'he536vdwh29f'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('instance_tokens',
sa.Column('token', sa.String(length=32), nullable=False),
sa.Column('instance_id', sa.String(length=32), nullable=True),
sa.Column('expires_on', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['instance_id'], ['instances.id'], name=op.f('fk_instance_tokens_instance_id_instances')),
sa.PrimaryKeyConstraint('token', name=op.f('pk_instance_tokens'))
)
# op.create_unique_constraint(op.f('uq_users_email_id'), 'users', ['email_id'])
# op.create_unique_constraint(op.f('uq_users_eppn'), 'users', ['eppn'])
# op.drop_constraint(u'uq_users_email', 'users', type_='unique')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
# op.create_unique_constraint(u'uq_users_email', 'users', ['eppn'])
# op.drop_constraint(op.f('uq_users_eppn'), 'users', type_='unique')
# op.drop_constraint(op.f('uq_users_email_id'), 'users', type_='unique')
op.drop_table('instance_tokens')
### end Alembic commands ###
| 35.846154
| 118
| 0.700286
|
revision = 'f0a99f6b5e5e'
down_revision = 'he536vdwh29f'
from alembic import op
import sqlalchemy as sa
def upgrade():
ance_id', sa.String(length=32), nullable=True),
sa.Column('expires_on', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['instance_id'], ['instances.id'], name=op.f('fk_instance_tokens_instance_id_instances')),
sa.PrimaryKeyConstraint('token', name=op.f('pk_instance_tokens'))
)
| true
| true
|
f71759fb13c6dffdc6c632f41e1c01f82a06b50a
| 103,166
|
py
|
Python
|
python/pyarrow/tests/test_parquet.py
|
sparkma/arrow
|
62fd703a4ef0abbecb02397a06a630a9dee382d9
|
[
"Apache-2.0"
] | null | null | null |
python/pyarrow/tests/test_parquet.py
|
sparkma/arrow
|
62fd703a4ef0abbecb02397a06a630a9dee382d9
|
[
"Apache-2.0"
] | null | null | null |
python/pyarrow/tests/test_parquet.py
|
sparkma/arrow
|
62fd703a4ef0abbecb02397a06a630a9dee382d9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import datetime
import decimal
import io
import json
import os
import six
import pickle
import pytest
import numpy as np
import pyarrow as pa
from pyarrow.compat import guid, u, BytesIO, unichar, PY2
from pyarrow.pandas_compat import _pandas_api
from pyarrow.tests import util
from pyarrow.filesystem import LocalFileSystem, FileSystem
try:
import pyarrow.parquet as pq
except ImportError:
pq = None
try:
import pandas as pd
import pandas.util.testing as tm
from .pandas_examples import dataframe_with_arrays, dataframe_with_lists
except ImportError:
pd = tm = None
# Marks all of the tests in this module
# Ignore these with pytest ... -m 'not parquet'
pytestmark = pytest.mark.parquet
@pytest.fixture(scope='module')
def datadir(datadir):
return datadir / 'parquet'
def _write_table(table, path, **kwargs):
# So we see the ImportError somewhere
import pyarrow.parquet as pq
if _pandas_api.is_data_frame(table):
table = pa.Table.from_pandas(table)
pq.write_table(table, path, **kwargs)
return table
def _read_table(*args, **kwargs):
return pq.read_table(*args, **kwargs)
def _roundtrip_table(table, read_table_kwargs=None,
write_table_kwargs=None):
read_table_kwargs = read_table_kwargs or {}
write_table_kwargs = write_table_kwargs or {}
buf = io.BytesIO()
_write_table(table, buf, **write_table_kwargs)
buf.seek(0)
return _read_table(buf, **read_table_kwargs)
def _check_roundtrip(table, expected=None, read_table_kwargs=None,
**write_table_kwargs):
if expected is None:
expected = table
read_table_kwargs = read_table_kwargs or {}
# intentionally check twice
result = _roundtrip_table(table, read_table_kwargs=read_table_kwargs,
write_table_kwargs=write_table_kwargs)
assert result.equals(expected)
result = _roundtrip_table(result, read_table_kwargs=read_table_kwargs,
write_table_kwargs=write_table_kwargs)
assert result.equals(expected)
def _roundtrip_pandas_dataframe(df, write_kwargs):
table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(table, buf, **write_kwargs)
buf.seek(0)
table1 = _read_table(buf)
return table1.to_pandas()
@pytest.mark.parametrize('dtype', [int, float])
def test_single_pylist_column_roundtrip(tempdir, dtype):
filename = tempdir / 'single_{}_column.parquet'.format(dtype.__name__)
data = [pa.array(list(map(dtype, range(5))))]
table = pa.Table.from_arrays(data, names=['a'])
_write_table(table, filename)
table_read = _read_table(filename)
for i in range(table.num_columns):
col_written = table[i]
col_read = table_read[i]
assert table.field(i).name == table_read.field(i).name
assert col_read.num_chunks == 1
data_written = col_written.chunk(0)
data_read = col_read.chunk(0)
assert data_written.equals(data_read)
def alltypes_sample(size=10000, seed=0, categorical=False):
np.random.seed(seed)
arrays = {
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
# TODO(wesm): Test other timestamp resolutions now that arrow supports
# them
'datetime': np.arange("2016-01-01T00:00:00.001", size,
dtype='datetime64[ms]'),
'str': pd.Series([str(x) for x in range(size)]),
'empty_str': [''] * size,
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'null': [None] * size,
'null_list': [None] * 2 + [[None] * (x % 4) for x in range(size - 2)],
}
if categorical:
arrays['str_category'] = arrays['str'].astype('category')
return pd.DataFrame(arrays)
@pytest.mark.pandas
@pytest.mark.parametrize('chunk_size', [None, 1000])
def test_pandas_parquet_2_0_roundtrip(tempdir, chunk_size):
df = alltypes_sample(size=10000, categorical=True)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert arrow_table.schema.pandas_metadata is not None
_write_table(arrow_table, filename, version="2.0",
coerce_timestamps='ms', chunk_size=chunk_size)
table_read = pq.read_pandas(filename)
assert table_read.schema.pandas_metadata is not None
assert arrow_table.schema.metadata == table_read.schema.metadata
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
def test_set_data_page_size():
arr = pa.array([1, 2, 3] * 1000000)
t = pa.Table.from_arrays([arr], names=['f0'])
# 128K, 256K, 512K
page_sizes = [2 << 16, 2 << 17, 2 << 18]
for target_page_size in page_sizes:
_check_roundtrip(t, data_page_size=target_page_size)
@pytest.mark.pandas
def test_chunked_table_write():
# ARROW-232
df = alltypes_sample(size=10)
batch = pa.RecordBatch.from_pandas(df)
table = pa.Table.from_batches([batch] * 3)
_check_roundtrip(table, version='2.0')
df, _ = dataframe_with_lists()
batch = pa.RecordBatch.from_pandas(df)
table = pa.Table.from_batches([batch] * 3)
_check_roundtrip(table, version='2.0')
@pytest.mark.pandas
def test_no_memory_map(tempdir):
df = alltypes_sample(size=10)
table = pa.Table.from_pandas(df)
_check_roundtrip(table, read_table_kwargs={'memory_map': False},
version='2.0')
filename = str(tempdir / 'tmp_file')
with open(filename, 'wb') as f:
_write_table(table, f, version='2.0')
table_read = pq.read_pandas(filename, memory_map=False)
assert table_read.equals(table)
def test_special_chars_filename(tempdir):
table = pa.Table.from_arrays([pa.array([42])], ["ints"])
filename = "foo # bar"
path = tempdir / filename
assert not path.exists()
_write_table(table, str(path))
assert path.exists()
table_read = _read_table(str(path))
assert table_read.equals(table)
@pytest.mark.pandas
def test_empty_table_roundtrip():
df = alltypes_sample(size=10)
# Create a non-empty table to infer the types correctly, then slice to 0
table = pa.Table.from_pandas(df)
table = pa.Table.from_arrays(
[col.chunk(0)[:0] for col in table.itercolumns()],
names=table.schema.names)
assert table.schema.field_by_name('null').type == pa.null()
assert table.schema.field_by_name('null_list').type == pa.list_(pa.null())
_check_roundtrip(table, version='2.0')
@pytest.mark.pandas
def test_empty_table_no_columns():
df = pd.DataFrame()
empty = pa.Table.from_pandas(df, preserve_index=False)
_check_roundtrip(empty)
def test_empty_lists_table_roundtrip():
# ARROW-2744: Shouldn't crash when writing an array of empty lists
arr = pa.array([[], []], type=pa.list_(pa.int32()))
table = pa.Table.from_arrays([arr], ["A"])
_check_roundtrip(table)
@pytest.mark.pandas
def test_pandas_parquet_datetime_tz():
s = pd.Series([datetime.datetime(2017, 9, 6)])
s = s.dt.tz_localize('utc')
s.index = s
# Both a column and an index to hit both use cases
df = pd.DataFrame({'tz_aware': s,
'tz_eastern': s.dt.tz_convert('US/Eastern')},
index=s)
f = BytesIO()
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, f, coerce_timestamps='ms')
f.seek(0)
table_read = pq.read_pandas(f)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
@pytest.mark.skipif(six.PY2, reason='datetime.timezone is available since '
'python version 3.2')
def test_datetime_timezone_tzinfo():
value = datetime.datetime(2018, 1, 1, 1, 23, 45,
tzinfo=datetime.timezone.utc)
df = pd.DataFrame({'foo': [value]})
_roundtrip_pandas_dataframe(df, write_kwargs={})
@pytest.mark.pandas
def test_pandas_parquet_custom_metadata(tempdir):
df = alltypes_sample(size=10000)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert b'pandas' in arrow_table.schema.metadata
_write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')
metadata = pq.read_metadata(filename).metadata
assert b'pandas' in metadata
js = json.loads(metadata[b'pandas'].decode('utf8'))
assert js['index_columns'] == [{'kind': 'range',
'name': None,
'start': 0, 'stop': 10000,
'step': 1}]
@pytest.mark.pandas
def test_pandas_parquet_column_multiindex(tempdir):
df = alltypes_sample(size=10)
df.columns = pd.MultiIndex.from_tuples(
list(zip(df.columns, df.columns[::-1])),
names=['level_1', 'level_2']
)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert arrow_table.schema.pandas_metadata is not None
_write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')
table_read = pq.read_pandas(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_parquet_2_0_roundtrip_read_pandas_no_index_written(tempdir):
df = alltypes_sample(size=10000)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
js = arrow_table.schema.pandas_metadata
assert not js['index_columns']
# ARROW-2170
# While index_columns should be empty, columns needs to be filled still.
assert js['columns']
_write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')
table_read = pq.read_pandas(filename)
js = table_read.schema.pandas_metadata
assert not js['index_columns']
assert arrow_table.schema.metadata == table_read.schema.metadata
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_parquet_1_0_roundtrip(tempdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, filename, version='1.0')
table_read = _read_table(filename)
df_read = table_read.to_pandas()
# We pass uint32_t as int64_t if we write Parquet version 1.0
df['uint32'] = df['uint32'].values.astype(np.int64)
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_multiple_path_types(tempdir):
# Test compatibility with PEP 519 path-like objects
path = tempdir / 'zzz.parquet'
df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)})
_write_table(df, path)
table_read = _read_table(path)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
# Test compatibility with plain string paths
path = str(tempdir) + 'zzz.parquet'
df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)})
_write_table(df, path)
table_read = _read_table(path)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_column_selection(tempdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16)
})
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, filename)
table_read = _read_table(filename, columns=['uint8'])
df_read = table_read.to_pandas()
tm.assert_frame_equal(df[['uint8']], df_read)
# ARROW-4267: Selection of duplicate columns still leads to these columns
# being read uniquely.
table_read = _read_table(filename, columns=['uint8', 'uint8'])
df_read = table_read.to_pandas()
tm.assert_frame_equal(df[['uint8']], df_read)
def _random_integers(size, dtype):
# We do not generate integers outside the int64 range
platform_int_info = np.iinfo('int_')
iinfo = np.iinfo(dtype)
return np.random.randint(max(iinfo.min, platform_int_info.min),
min(iinfo.max, platform_int_info.max),
size=size).astype(dtype)
def _test_dataframe(size=10000, seed=0):
np.random.seed(seed)
df = pd.DataFrame({
'uint8': _random_integers(size, np.uint8),
'uint16': _random_integers(size, np.uint16),
'uint32': _random_integers(size, np.uint32),
'uint64': _random_integers(size, np.uint64),
'int8': _random_integers(size, np.int8),
'int16': _random_integers(size, np.int16),
'int32': _random_integers(size, np.int32),
'int64': _random_integers(size, np.int64),
'float32': np.random.randn(size).astype(np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'strings': [tm.rands(10) for i in range(size)],
'all_none': [None] * size,
'all_none_category': [None] * size
})
# TODO(PARQUET-1015)
# df['all_none_category'] = df['all_none_category'].astype('category')
return df
@pytest.mark.pandas
def test_pandas_parquet_native_file_roundtrip(tempdir):
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = _read_table(reader).to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_parquet_incremental_file_build(tempdir):
df = _test_dataframe(100)
df['unique_id'] = 0
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
out = pa.BufferOutputStream()
writer = pq.ParquetWriter(out, arrow_table.schema, version='2.0')
frames = []
for i in range(10):
df['unique_id'] = i
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
writer.write_table(arrow_table)
frames.append(df.copy())
writer.close()
buf = out.getvalue()
result = _read_table(pa.BufferReader(buf))
expected = pd.concat(frames, ignore_index=True)
tm.assert_frame_equal(result.to_pandas(), expected)
@pytest.mark.pandas
def test_read_pandas_column_subset(tempdir):
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = pq.read_pandas(reader, columns=['strings', 'uint8']).to_pandas()
tm.assert_frame_equal(df[['strings', 'uint8']], df_read)
@pytest.mark.pandas
def test_pandas_parquet_empty_roundtrip(tempdir):
df = _test_dataframe(0)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = _read_table(reader).to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_parquet_pyfile_roundtrip(tempdir):
filename = tempdir / 'pandas_pyfile_roundtrip.parquet'
size = 5
df = pd.DataFrame({
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'strings': ['foo', 'bar', None, 'baz', 'qux']
})
arrow_table = pa.Table.from_pandas(df)
with filename.open('wb') as f:
_write_table(arrow_table, f, version="1.0")
data = io.BytesIO(filename.read_bytes())
table_read = _read_table(data)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_parquet_configuration_options(tempdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0
})
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
for use_dictionary in [True, False]:
_write_table(arrow_table, filename, version='2.0',
use_dictionary=use_dictionary)
table_read = _read_table(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
for write_statistics in [True, False]:
_write_table(arrow_table, filename, version='2.0',
write_statistics=write_statistics)
table_read = _read_table(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
for compression in ['NONE', 'SNAPPY', 'GZIP', 'LZ4', 'ZSTD']:
_write_table(arrow_table, filename, version='2.0',
compression=compression)
table_read = _read_table(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
def make_sample_file(table_or_df):
if isinstance(table_or_df, pa.Table):
a_table = table_or_df
else:
a_table = pa.Table.from_pandas(table_or_df)
buf = io.BytesIO()
_write_table(a_table, buf, compression='SNAPPY', version='2.0',
coerce_timestamps='ms')
buf.seek(0)
return pq.ParquetFile(buf)
@pytest.mark.pandas
def test_parquet_metadata_api():
df = alltypes_sample(size=10000)
df = df.reindex(columns=sorted(df.columns))
df.index = np.random.randint(0, 1000000, size=len(df))
fileh = make_sample_file(df)
ncols = len(df.columns)
# Series of sniff tests
meta = fileh.metadata
repr(meta)
assert meta.num_rows == len(df)
assert meta.num_columns == ncols + 1 # +1 for index
assert meta.num_row_groups == 1
assert meta.format_version == '2.0'
assert 'parquet-cpp' in meta.created_by
assert isinstance(meta.serialized_size, int)
assert isinstance(meta.metadata, dict)
# Schema
schema = fileh.schema
assert meta.schema is schema
assert len(schema) == ncols + 1 # +1 for index
repr(schema)
col = schema[0]
repr(col)
assert col.name == df.columns[0]
assert col.max_definition_level == 1
assert col.max_repetition_level == 0
assert col.max_repetition_level == 0
assert col.physical_type == 'BOOLEAN'
assert col.converted_type == 'NONE'
with pytest.raises(IndexError):
schema[ncols + 1] # +1 for index
with pytest.raises(IndexError):
schema[-1]
# Row group
for rg in range(meta.num_row_groups):
rg_meta = meta.row_group(rg)
assert isinstance(rg_meta, pq.RowGroupMetaData)
repr(rg_meta)
for col in range(rg_meta.num_columns):
col_meta = rg_meta.column(col)
assert isinstance(col_meta, pq.ColumnChunkMetaData)
repr(col_meta)
with pytest.raises(IndexError):
meta.row_group(-1)
with pytest.raises(IndexError):
meta.row_group(meta.num_row_groups + 1)
rg_meta = meta.row_group(0)
assert rg_meta.num_rows == len(df)
assert rg_meta.num_columns == ncols + 1 # +1 for index
assert rg_meta.total_byte_size > 0
with pytest.raises(IndexError):
col_meta = rg_meta.column(-1)
with pytest.raises(IndexError):
col_meta = rg_meta.column(ncols + 2)
col_meta = rg_meta.column(0)
assert col_meta.file_offset > 0
assert col_meta.file_path == '' # created from BytesIO
assert col_meta.physical_type == 'BOOLEAN'
assert col_meta.num_values == 10000
assert col_meta.path_in_schema == 'bool'
assert col_meta.is_stats_set is True
assert isinstance(col_meta.statistics, pq.Statistics)
assert col_meta.compression == 'SNAPPY'
assert col_meta.encodings == ('PLAIN', 'RLE')
assert col_meta.has_dictionary_page is False
assert col_meta.dictionary_page_offset is None
assert col_meta.data_page_offset > 0
assert col_meta.total_compressed_size > 0
assert col_meta.total_uncompressed_size > 0
with pytest.raises(NotImplementedError):
col_meta.has_index_page
with pytest.raises(NotImplementedError):
col_meta.index_page_offset
@pytest.mark.pandas
@pytest.mark.parametrize(
(
'data',
'type',
'physical_type',
'min_value',
'max_value',
'null_count',
'num_values',
'distinct_count'
),
[
([1, 2, 2, None, 4], pa.uint8(), 'INT32', 1, 4, 1, 4, 0),
([1, 2, 2, None, 4], pa.uint16(), 'INT32', 1, 4, 1, 4, 0),
([1, 2, 2, None, 4], pa.uint32(), 'INT32', 1, 4, 1, 4, 0),
([1, 2, 2, None, 4], pa.uint64(), 'INT64', 1, 4, 1, 4, 0),
([-1, 2, 2, None, 4], pa.int8(), 'INT32', -1, 4, 1, 4, 0),
([-1, 2, 2, None, 4], pa.int16(), 'INT32', -1, 4, 1, 4, 0),
([-1, 2, 2, None, 4], pa.int32(), 'INT32', -1, 4, 1, 4, 0),
([-1, 2, 2, None, 4], pa.int64(), 'INT64', -1, 4, 1, 4, 0),
(
[-1.1, 2.2, 2.3, None, 4.4], pa.float32(),
'FLOAT', -1.1, 4.4, 1, 4, 0
),
(
[-1.1, 2.2, 2.3, None, 4.4], pa.float64(),
'DOUBLE', -1.1, 4.4, 1, 4, 0
),
(
[u'', u'b', unichar(1000), None, u'aaa'], pa.binary(),
'BYTE_ARRAY', b'', unichar(1000).encode('utf-8'), 1, 4, 0
),
(
[True, False, False, True, True], pa.bool_(),
'BOOLEAN', False, True, 0, 5, 0
),
(
[b'\x00', b'b', b'12', None, b'aaa'], pa.binary(),
'BYTE_ARRAY', b'\x00', b'b', 1, 4, 0
),
]
)
def test_parquet_column_statistics_api(data, type, physical_type, min_value,
max_value, null_count, num_values,
distinct_count):
df = pd.DataFrame({'data': data})
schema = pa.schema([pa.field('data', type)])
table = pa.Table.from_pandas(df, schema=schema, safe=False)
fileh = make_sample_file(table)
meta = fileh.metadata
rg_meta = meta.row_group(0)
col_meta = rg_meta.column(0)
stat = col_meta.statistics
assert stat.has_min_max
assert _close(type, stat.min, min_value)
assert _close(type, stat.max, max_value)
assert stat.null_count == null_count
assert stat.num_values == num_values
# TODO(kszucs) until parquet-cpp API doesn't expose HasDistinctCount
# method, missing distinct_count is represented as zero instead of None
assert stat.distinct_count == distinct_count
assert stat.physical_type == physical_type
def _close(type, left, right):
if type == pa.float32():
return abs(left - right) < 1E-7
elif type == pa.float64():
return abs(left - right) < 1E-13
else:
return left == right
def test_statistics_convert_logical_types(tempdir):
# ARROW-5166, ARROW-4139
# (min, max, type)
cases = [(10, 11164359321221007157, pa.uint64()),
(10, 4294967295, pa.uint32()),
(u"ähnlich", u"öffentlich", pa.utf8()),
(datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000),
pa.time32('ms')),
(datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000),
pa.time64('us')),
(datetime.datetime(2019, 6, 24, 0, 0, 0, 1000),
datetime.datetime(2019, 6, 25, 0, 0, 0, 1000),
pa.timestamp('ms')),
(datetime.datetime(2019, 6, 24, 0, 0, 0, 1000),
datetime.datetime(2019, 6, 25, 0, 0, 0, 1000),
pa.timestamp('us'))]
for i, (min_val, max_val, typ) in enumerate(cases):
t = pa.Table.from_arrays([pa.array([min_val, max_val], type=typ)],
['col'])
path = str(tempdir / ('example{}.parquet'.format(i)))
pq.write_table(t, path, version='2.0')
pf = pq.ParquetFile(path)
stats = pf.metadata.row_group(0).column(0).statistics
assert stats.min == min_val
assert stats.max == max_val
def test_parquet_write_disable_statistics(tempdir):
table = pa.Table.from_pydict(
{'a': pa.array([1, 2, 3]), 'b': pa.array(['a', 'b', 'c'])})
_write_table(table, tempdir / 'data.parquet')
meta = pq.read_metadata(tempdir / 'data.parquet')
for col in [0, 1]:
cc = meta.row_group(0).column(col)
assert cc.is_stats_set is True
assert cc.statistics is not None
_write_table(table, tempdir / 'data2.parquet', write_statistics=False)
meta = pq.read_metadata(tempdir / 'data2.parquet')
for col in [0, 1]:
cc = meta.row_group(0).column(col)
assert cc.is_stats_set is False
assert cc.statistics is None
_write_table(table, tempdir / 'data3.parquet', write_statistics=['a'])
meta = pq.read_metadata(tempdir / 'data3.parquet')
cc_a = meta.row_group(0).column(0)
assert cc_a.is_stats_set is True
assert cc_a.statistics is not None
cc_b = meta.row_group(0).column(1)
assert cc_b.is_stats_set is False
assert cc_b.statistics is None
@pytest.mark.pandas
def test_compare_schemas():
df = alltypes_sample(size=10000)
fileh = make_sample_file(df)
fileh2 = make_sample_file(df)
fileh3 = make_sample_file(df[df.columns[::2]])
# ParquetSchema
assert isinstance(fileh.schema, pq.ParquetSchema)
assert fileh.schema.equals(fileh.schema)
assert fileh.schema == fileh.schema
assert fileh.schema.equals(fileh2.schema)
assert fileh.schema == fileh2.schema
assert fileh.schema != 'arbitrary object'
assert not fileh.schema.equals(fileh3.schema)
assert fileh.schema != fileh3.schema
# ColumnSchema
assert isinstance(fileh.schema[0], pq.ColumnSchema)
assert fileh.schema[0].equals(fileh.schema[0])
assert fileh.schema[0] == fileh.schema[0]
assert not fileh.schema[0].equals(fileh.schema[1])
assert fileh.schema[0] != fileh.schema[1]
assert fileh.schema[0] != 'arbitrary object'
def test_validate_schema_write_table(tempdir):
# ARROW-2926
simple_fields = [
pa.field('POS', pa.uint32()),
pa.field('desc', pa.string())
]
simple_schema = pa.schema(simple_fields)
# simple_table schema does not match simple_schema
simple_from_array = [pa.array([1]), pa.array(['bla'])]
simple_table = pa.Table.from_arrays(simple_from_array, ['POS', 'desc'])
path = tempdir / 'simple_validate_schema.parquet'
with pq.ParquetWriter(path, simple_schema,
version='2.0',
compression='snappy', flavor='spark') as w:
with pytest.raises(ValueError):
w.write_table(simple_table)
@pytest.mark.pandas
def test_column_of_arrays(tempdir):
df, schema = dataframe_with_arrays()
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename, version="2.0", coerce_timestamps='ms')
table_read = _read_table(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_coerce_timestamps(tempdir):
from collections import OrderedDict
# ARROW-622
arrays = OrderedDict()
fields = [pa.field('datetime64',
pa.list_(pa.timestamp('ms')))]
arrays['datetime64'] = [
np.array(['2007-07-13T01:23:34.123456789',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ms]'),
None,
None,
np.array(['2007-07-13T02',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ms]'),
]
df = pd.DataFrame(arrays)
schema = pa.schema(fields)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename, version="2.0", coerce_timestamps='us')
table_read = _read_table(filename)
df_read = table_read.to_pandas()
df_expected = df.copy()
for i, x in enumerate(df_expected['datetime64']):
if isinstance(x, np.ndarray):
df_expected['datetime64'][i] = x.astype('M8[us]')
tm.assert_frame_equal(df_expected, df_read)
with pytest.raises(ValueError):
_write_table(arrow_table, filename, version='2.0',
coerce_timestamps='unknown')
@pytest.mark.pandas
def test_coerce_timestamps_truncated(tempdir):
"""
ARROW-2555: Test that we can truncate timestamps when coercing if
explicitly allowed.
"""
dt_us = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1,
second=1, microsecond=1)
dt_ms = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1,
second=1)
fields_us = [pa.field('datetime64', pa.timestamp('us'))]
arrays_us = {'datetime64': [dt_us, dt_ms]}
df_us = pd.DataFrame(arrays_us)
schema_us = pa.schema(fields_us)
filename = tempdir / 'pandas_truncated.parquet'
table_us = pa.Table.from_pandas(df_us, schema=schema_us)
_write_table(table_us, filename, version="2.0", coerce_timestamps='ms',
allow_truncated_timestamps=True)
table_ms = _read_table(filename)
df_ms = table_ms.to_pandas()
arrays_expected = {'datetime64': [dt_ms, dt_ms]}
df_expected = pd.DataFrame(arrays_expected)
tm.assert_frame_equal(df_expected, df_ms)
@pytest.mark.pandas
def test_column_of_lists(tempdir):
df, schema = dataframe_with_lists(parquet_compatible=True)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename, version='2.0')
table_read = _read_table(filename)
df_read = table_read.to_pandas()
if PY2:
# assert_frame_equal fails when comparing datetime.date and
# np.datetime64, even with check_datetimelike_compat=True so
# convert the values to np.datetime64 instead
for col in ['date32[day]_list', 'date64[ms]_list']:
df[col] = df[col].apply(
lambda x: list(map(np.datetime64, x)) if x else x
)
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_date_time_types(tempdir):
t1 = pa.date32()
data1 = np.array([17259, 17260, 17261], dtype='int32')
a1 = pa.array(data1, type=t1)
t2 = pa.date64()
data2 = data1.astype('int64') * 86400000
a2 = pa.array(data2, type=t2)
t3 = pa.timestamp('us')
start = pd.Timestamp('2001-01-01').value / 1000
data3 = np.array([start, start + 1, start + 2], dtype='int64')
a3 = pa.array(data3, type=t3)
t4 = pa.time32('ms')
data4 = np.arange(3, dtype='i4')
a4 = pa.array(data4, type=t4)
t5 = pa.time64('us')
a5 = pa.array(data4.astype('int64'), type=t5)
t6 = pa.time32('s')
a6 = pa.array(data4, type=t6)
ex_t6 = pa.time32('ms')
ex_a6 = pa.array(data4 * 1000, type=ex_t6)
t7 = pa.timestamp('ns')
start = pd.Timestamp('2001-01-01').value
data7 = np.array([start, start + 1000, start + 2000],
dtype='int64')
a7 = pa.array(data7, type=t7)
table = pa.Table.from_arrays([a1, a2, a3, a4, a5, a6, a7],
['date32', 'date64', 'timestamp[us]',
'time32[s]', 'time64[us]',
'time32_from64[s]',
'timestamp[ns]'])
# date64 as date32
# time32[s] to time32[ms]
expected = pa.Table.from_arrays([a1, a1, a3, a4, a5, ex_a6, a7],
['date32', 'date64', 'timestamp[us]',
'time32[s]', 'time64[us]',
'time32_from64[s]',
'timestamp[ns]'])
_check_roundtrip(table, expected=expected, version='2.0')
t0 = pa.timestamp('ms')
data0 = np.arange(4, dtype='int64')
a0 = pa.array(data0, type=t0)
t1 = pa.timestamp('us')
data1 = np.arange(4, dtype='int64')
a1 = pa.array(data1, type=t1)
t2 = pa.timestamp('ns')
data2 = np.arange(4, dtype='int64')
a2 = pa.array(data2, type=t2)
table = pa.Table.from_arrays([a0, a1, a2],
['ts[ms]', 'ts[us]', 'ts[ns]'])
expected = pa.Table.from_arrays([a0, a1, a2],
['ts[ms]', 'ts[us]', 'ts[ns]'])
# int64 for all timestamps supported by default
filename = tempdir / 'int64_timestamps.parquet'
_write_table(table, filename, version='2.0')
parquet_schema = pq.ParquetFile(filename).schema
for i in range(3):
assert parquet_schema.column(i).physical_type == 'INT64'
read_table = _read_table(filename)
assert read_table.equals(expected)
t0_ns = pa.timestamp('ns')
data0_ns = np.array(data0 * 1000000, dtype='int64')
a0_ns = pa.array(data0_ns, type=t0_ns)
t1_ns = pa.timestamp('ns')
data1_ns = np.array(data1 * 1000, dtype='int64')
a1_ns = pa.array(data1_ns, type=t1_ns)
expected = pa.Table.from_arrays([a0_ns, a1_ns, a2],
['ts[ms]', 'ts[us]', 'ts[ns]'])
# int96 nanosecond timestamps produced upon request
filename = tempdir / 'explicit_int96_timestamps.parquet'
_write_table(table, filename, version='2.0',
use_deprecated_int96_timestamps=True)
parquet_schema = pq.ParquetFile(filename).schema
for i in range(3):
assert parquet_schema.column(i).physical_type == 'INT96'
read_table = _read_table(filename)
assert read_table.equals(expected)
# int96 nanosecond timestamps implied by flavor 'spark'
filename = tempdir / 'spark_int96_timestamps.parquet'
_write_table(table, filename, version='2.0',
flavor='spark')
parquet_schema = pq.ParquetFile(filename).schema
for i in range(3):
assert parquet_schema.column(i).physical_type == 'INT96'
read_table = _read_table(filename)
assert read_table.equals(expected)
def test_timestamp_restore_timezone():
# ARROW-5888, restore timezone from serialized metadata
ty = pa.timestamp('ms', tz='America/New_York')
arr = pa.array([1, 2, 3], type=ty)
t = pa.table([arr], names=['f0'])
_check_roundtrip(t)
@pytest.mark.pandas
def test_list_of_datetime_time_roundtrip():
# ARROW-4135
times = pd.to_datetime(['09:00', '09:30', '10:00', '10:30', '11:00',
'11:30', '12:00'])
df = pd.DataFrame({'time': [times.time]})
_roundtrip_pandas_dataframe(df, write_kwargs={})
@pytest.mark.pandas
def test_parquet_version_timestamp_differences():
i_s = pd.Timestamp('2010-01-01').value / 1000000000 # := 1262304000
d_s = np.arange(i_s, i_s + 10, 1, dtype='int64')
d_ms = d_s * 1000
d_us = d_ms * 1000
d_ns = d_us * 1000
a_s = pa.array(d_s, type=pa.timestamp('s'))
a_ms = pa.array(d_ms, type=pa.timestamp('ms'))
a_us = pa.array(d_us, type=pa.timestamp('us'))
a_ns = pa.array(d_ns, type=pa.timestamp('ns'))
names = ['ts:s', 'ts:ms', 'ts:us', 'ts:ns']
table = pa.Table.from_arrays([a_s, a_ms, a_us, a_ns], names)
# Using Parquet version 1.0, seconds should be coerced to milliseconds
# and nanoseconds should be coerced to microseconds by default
expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_us], names)
_check_roundtrip(table, expected)
# Using Parquet version 2.0, seconds should be coerced to milliseconds
# and nanoseconds should be retained by default
expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_ns], names)
_check_roundtrip(table, expected, version='2.0')
# Using Parquet version 1.0, coercing to milliseconds or microseconds
# is allowed
expected = pa.Table.from_arrays([a_ms, a_ms, a_ms, a_ms], names)
_check_roundtrip(table, expected, coerce_timestamps='ms')
# Using Parquet version 2.0, coercing to milliseconds or microseconds
# is allowed
expected = pa.Table.from_arrays([a_us, a_us, a_us, a_us], names)
_check_roundtrip(table, expected, version='2.0', coerce_timestamps='us')
# TODO: after pyarrow allows coerce_timestamps='ns', tests like the
# following should pass ...
# Using Parquet version 1.0, coercing to nanoseconds is not allowed
# expected = None
# with pytest.raises(NotImplementedError):
# _roundtrip_table(table, coerce_timestamps='ns')
# Using Parquet version 2.0, coercing to nanoseconds is allowed
# expected = pa.Table.from_arrays([a_ns, a_ns, a_ns, a_ns], names)
# _check_roundtrip(table, expected, version='2.0', coerce_timestamps='ns')
# For either Parquet version, coercing to nanoseconds is allowed
# if Int96 storage is used
expected = pa.Table.from_arrays([a_ns, a_ns, a_ns, a_ns], names)
_check_roundtrip(table, expected,
use_deprecated_int96_timestamps=True)
_check_roundtrip(table, expected, version='2.0',
use_deprecated_int96_timestamps=True)
def test_large_list_records():
# This was fixed in PARQUET-1100
list_lengths = np.random.randint(0, 500, size=50)
list_lengths[::10] = 0
list_values = [list(map(int, np.random.randint(0, 100, size=x)))
if i % 8 else None
for i, x in enumerate(list_lengths)]
a1 = pa.array(list_values)
table = pa.Table.from_arrays([a1], ['int_lists'])
_check_roundtrip(table)
def test_sanitized_spark_field_names():
a0 = pa.array([0, 1, 2, 3, 4])
name = 'prohib; ,\t{}'
table = pa.Table.from_arrays([a0], [name])
result = _roundtrip_table(table, write_table_kwargs={'flavor': 'spark'})
expected_name = 'prohib______'
assert result.schema[0].name == expected_name
@pytest.mark.pandas
def test_spark_flavor_preserves_pandas_metadata():
df = _test_dataframe(size=100)
df.index = np.arange(0, 10 * len(df), 10)
df.index.name = 'foo'
result = _roundtrip_pandas_dataframe(df, {'version': '2.0',
'flavor': 'spark'})
tm.assert_frame_equal(result, df)
def test_fixed_size_binary():
t0 = pa.binary(10)
data = [b'fooooooooo', None, b'barooooooo', b'quxooooooo']
a0 = pa.array(data, type=t0)
table = pa.Table.from_arrays([a0],
['binary[10]'])
_check_roundtrip(table)
@pytest.mark.pandas
def test_multithreaded_read():
df = alltypes_sample(size=10000)
table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(table, buf, compression='SNAPPY', version='2.0')
buf.seek(0)
table1 = _read_table(buf, use_threads=True)
buf.seek(0)
table2 = _read_table(buf, use_threads=False)
assert table1.equals(table2)
@pytest.mark.pandas
def test_min_chunksize():
data = pd.DataFrame([np.arange(4)], columns=['A', 'B', 'C', 'D'])
table = pa.Table.from_pandas(data.reset_index())
buf = io.BytesIO()
_write_table(table, buf, chunk_size=-1)
buf.seek(0)
result = _read_table(buf)
assert result.equals(table)
with pytest.raises(ValueError):
_write_table(table, buf, chunk_size=0)
@pytest.mark.pandas
def test_pass_separate_metadata():
# ARROW-471
df = alltypes_sample(size=10000)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, compression='snappy', version='2.0')
buf.seek(0)
metadata = pq.read_metadata(buf)
buf.seek(0)
fileh = pq.ParquetFile(buf, metadata=metadata)
tm.assert_frame_equal(df, fileh.read().to_pandas())
@pytest.mark.pandas
def test_read_single_row_group():
# ARROW-471
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
assert pf.num_row_groups == K
row_groups = [pf.read_row_group(i) for i in range(K)]
result = pa.concat_tables(row_groups)
tm.assert_frame_equal(df, result.to_pandas())
@pytest.mark.pandas
def test_read_single_row_group_with_column_subset():
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
cols = list(df.columns[:2])
row_groups = [pf.read_row_group(i, columns=cols) for i in range(K)]
result = pa.concat_tables(row_groups)
tm.assert_frame_equal(df[cols], result.to_pandas())
# ARROW-4267: Selection of duplicate columns still leads to these columns
# being read uniquely.
row_groups = [pf.read_row_group(i, columns=cols + cols) for i in range(K)]
result = pa.concat_tables(row_groups)
tm.assert_frame_equal(df[cols], result.to_pandas())
@pytest.mark.pandas
def test_scan_contents():
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
assert pf.scan_contents() == 10000
assert pf.scan_contents(df.columns[:4]) == 10000
@pytest.mark.pandas
def test_parquet_piece_read(tempdir):
df = _test_dataframe(1000)
table = pa.Table.from_pandas(df)
path = tempdir / 'parquet_piece_read.parquet'
_write_table(table, path, version='2.0')
piece1 = pq.ParquetDatasetPiece(path)
result = piece1.read()
assert result.equals(table)
@pytest.mark.pandas
def test_parquet_piece_open_and_get_metadata(tempdir):
df = _test_dataframe(100)
table = pa.Table.from_pandas(df)
path = tempdir / 'parquet_piece_read.parquet'
_write_table(table, path, version='2.0')
piece = pq.ParquetDatasetPiece(path)
table1 = piece.read()
assert isinstance(table1, pa.Table)
meta1 = piece.get_metadata()
assert isinstance(meta1, pq.FileMetaData)
assert table == table1
def test_parquet_piece_basics():
path = '/baz.parq'
piece1 = pq.ParquetDatasetPiece(path)
piece2 = pq.ParquetDatasetPiece(path, row_group=1)
piece3 = pq.ParquetDatasetPiece(
path, row_group=1, partition_keys=[('foo', 0), ('bar', 1)])
assert str(piece1) == path
assert str(piece2) == '/baz.parq | row_group=1'
assert str(piece3) == 'partition[foo=0, bar=1] /baz.parq | row_group=1'
assert piece1 == piece1
assert piece2 == piece2
assert piece3 == piece3
assert piece1 != piece3
def test_partition_set_dictionary_type():
set1 = pq.PartitionSet('key1', [u('foo'), u('bar'), u('baz')])
set2 = pq.PartitionSet('key2', [2007, 2008, 2009])
assert isinstance(set1.dictionary, pa.StringArray)
assert isinstance(set2.dictionary, pa.IntegerArray)
set3 = pq.PartitionSet('key2', [datetime.datetime(2007, 1, 1)])
with pytest.raises(TypeError):
set3.dictionary
@pytest.mark.pandas
def test_read_partitioned_directory(tempdir):
fs = LocalFileSystem.get_instance()
_partition_test_for_filesystem(fs, tempdir)
@pytest.mark.pandas
def test_create_parquet_dataset_multi_threaded(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
_partition_test_for_filesystem(fs, base_path)
manifest = pq.ParquetManifest(base_path, filesystem=fs,
metadata_nthreads=1)
dataset = pq.ParquetDataset(base_path, filesystem=fs, metadata_nthreads=16)
assert len(dataset.pieces) > 0
partitions = dataset.partitions
assert len(partitions.partition_names) > 0
assert partitions.partition_names == manifest.partitions.partition_names
assert len(partitions.levels) == len(manifest.partitions.levels)
@pytest.mark.pandas
def test_equivalency(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1]
string_keys = ['a', 'b', 'c']
boolean_keys = [True, False]
partition_spec = [
['integer', integer_keys],
['string', string_keys],
['boolean', boolean_keys]
]
df = pd.DataFrame({
'integer': np.array(integer_keys, dtype='i4').repeat(15),
'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2),
'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5),
3),
}, columns=['integer', 'string', 'boolean'])
_generate_partition_directories(fs, base_path, partition_spec, df)
# Old filters syntax:
# integer == 1 AND string != b AND boolean == True
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[('integer', '=', 1), ('string', '!=', 'b'),
('boolean', '==', True)]
)
table = dataset.read()
result_df = (table.to_pandas().reset_index(drop=True))
assert 0 not in result_df['integer'].values
assert 'b' not in result_df['string'].values
assert False not in result_df['boolean'].values
# filters in disjunctive normal form:
# (integer == 1 AND string != b AND boolean == True) OR
# (integer == 2 AND boolean == False)
# TODO(ARROW-3388): boolean columns are reconstructed as string
filters = [
[
('integer', '=', 1),
('string', '!=', 'b'),
('boolean', '==', 'True')
],
[('integer', '=', 0), ('boolean', '==', 'False')]
]
dataset = pq.ParquetDataset(base_path, filesystem=fs, filters=filters)
table = dataset.read()
result_df = table.to_pandas().reset_index(drop=True)
# Check that all rows in the DF fulfill the filter
# Pandas 0.23.x has problems with indexing constant memoryviews in
# categoricals. Thus we need to make an explicity copy here with np.array.
df_filter_1 = (np.array(result_df['integer']) == 1) \
& (np.array(result_df['string']) != 'b') \
& (np.array(result_df['boolean']) == 'True')
df_filter_2 = (np.array(result_df['integer']) == 0) \
& (np.array(result_df['boolean']) == 'False')
assert df_filter_1.sum() > 0
assert df_filter_2.sum() > 0
assert result_df.shape[0] == (df_filter_1.sum() + df_filter_2.sum())
# Check for \0 in predicate values. Until they are correctly implemented
# in ARROW-3391, they would otherwise lead to weird results with the
# current code.
with pytest.raises(NotImplementedError):
filters = [[('string', '==', b'1\0a')]]
pq.ParquetDataset(base_path, filesystem=fs, filters=filters)
with pytest.raises(NotImplementedError):
filters = [[('string', '==', u'1\0a')]]
pq.ParquetDataset(base_path, filesystem=fs, filters=filters)
@pytest.mark.pandas
def test_cutoff_exclusive_integer(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[
('integers', '<', 4),
('integers', '>', 1),
]
)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
result_list = [x for x in map(int, result_df['integers'].values)]
assert result_list == [2, 3]
@pytest.mark.pandas
@pytest.mark.xfail(
raises=TypeError,
reason='Loss of type information in creation of categoricals.'
)
def test_cutoff_exclusive_datetime(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
date_keys = [
datetime.date(2018, 4, 9),
datetime.date(2018, 4, 10),
datetime.date(2018, 4, 11),
datetime.date(2018, 4, 12),
datetime.date(2018, 4, 13)
]
partition_spec = [
['dates', date_keys]
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'dates': np.array(date_keys, dtype='datetime64'),
}, columns=['index', 'dates'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[
('dates', '<', "2018-04-12"),
('dates', '>', "2018-04-10")
]
)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
expected = pd.Categorical(
np.array([datetime.date(2018, 4, 11)], dtype='datetime64'),
categories=np.array(date_keys, dtype='datetime64'))
assert result_df['dates'].values == expected
@pytest.mark.pandas
def test_inclusive_integer(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[
('integers', '<=', 3),
('integers', '>=', 2),
]
)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
result_list = [int(x) for x in map(int, result_df['integers'].values)]
assert result_list == [2, 3]
@pytest.mark.pandas
def test_inclusive_set(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1]
string_keys = ['a', 'b', 'c']
boolean_keys = [True, False]
partition_spec = [
['integer', integer_keys],
['string', string_keys],
['boolean', boolean_keys]
]
df = pd.DataFrame({
'integer': np.array(integer_keys, dtype='i4').repeat(15),
'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2),
'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5),
3),
}, columns=['integer', 'string', 'boolean'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[('integer', 'in', {1}), ('string', 'in', {'a', 'b'}),
('boolean', 'in', {True})]
)
table = dataset.read()
result_df = (table.to_pandas().reset_index(drop=True))
assert 0 not in result_df['integer'].values
assert 'c' not in result_df['string'].values
assert False not in result_df['boolean'].values
@pytest.mark.pandas
def test_invalid_pred_op(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
with pytest.raises(ValueError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[
('integers', '=<', 3),
])
with pytest.raises(ValueError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[
('integers', 'in', set()),
])
with pytest.raises(ValueError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[
('integers', '!=', {3}),
])
@pytest.mark.pandas
def test_filters_read_table(tempdir):
# test that filters keyword is passed through in read_table
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
table = pq.read_table(
base_path, filesystem=fs, filters=[('integers', '<', 3)])
assert table.num_rows == 3
table = pq.read_table(
base_path, filesystem=fs, filters=[[('integers', '<', 3)]])
assert table.num_rows == 3
table = pq.read_pandas(
base_path, filters=[('integers', '<', 3)])
assert table.num_rows == 3
@pytest.yield_fixture
def s3_example():
access_key = os.environ['PYARROW_TEST_S3_ACCESS_KEY']
secret_key = os.environ['PYARROW_TEST_S3_SECRET_KEY']
bucket_name = os.environ['PYARROW_TEST_S3_BUCKET']
import s3fs
fs = s3fs.S3FileSystem(key=access_key, secret=secret_key)
test_dir = guid()
bucket_uri = 's3://{0}/{1}'.format(bucket_name, test_dir)
fs.mkdir(bucket_uri)
yield fs, bucket_uri
fs.rm(bucket_uri, recursive=True)
@pytest.mark.pandas
@pytest.mark.s3
def test_read_partitioned_directory_s3fs(s3_example):
from pyarrow.filesystem import S3FSWrapper
fs, bucket_uri = s3_example
wrapper = S3FSWrapper(fs)
_partition_test_for_filesystem(wrapper, bucket_uri)
# Check that we can auto-wrap
dataset = pq.ParquetDataset(bucket_uri, filesystem=fs)
dataset.read()
def _partition_test_for_filesystem(fs, base_path):
foo_keys = [0, 1]
bar_keys = ['a', 'b', 'c']
partition_spec = [
['foo', foo_keys],
['bar', bar_keys]
]
N = 30
df = pd.DataFrame({
'index': np.arange(N),
'foo': np.array(foo_keys, dtype='i4').repeat(15),
'bar': np.tile(np.tile(np.array(bar_keys, dtype=object), 5), 2),
'values': np.random.randn(N)
}, columns=['index', 'foo', 'bar', 'values'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(base_path, filesystem=fs)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
expected_df = (df.sort_values(by='index')
.reset_index(drop=True)
.reindex(columns=result_df.columns))
expected_df['foo'] = pd.Categorical(df['foo'], categories=foo_keys)
expected_df['bar'] = pd.Categorical(df['bar'], categories=bar_keys)
assert (result_df.columns == ['index', 'values', 'foo', 'bar']).all()
tm.assert_frame_equal(result_df, expected_df)
def _generate_partition_directories(fs, base_dir, partition_spec, df):
# partition_spec : list of lists, e.g. [['foo', [0, 1, 2],
# ['bar', ['a', 'b', 'c']]
# part_table : a pyarrow.Table to write to each partition
DEPTH = len(partition_spec)
def _visit_level(base_dir, level, part_keys):
name, values = partition_spec[level]
for value in values:
this_part_keys = part_keys + [(name, value)]
level_dir = base_dir / '{0}={1}'.format(name, value)
fs.mkdir(level_dir)
if level == DEPTH - 1:
# Generate example data
file_path = level_dir / guid()
filtered_df = _filter_partition(df, this_part_keys)
part_table = pa.Table.from_pandas(filtered_df)
with fs.open(file_path, 'wb') as f:
_write_table(part_table, f)
assert fs.exists(file_path)
(level_dir / '_SUCCESS').touch()
else:
_visit_level(level_dir, level + 1, this_part_keys)
(level_dir / '_SUCCESS').touch()
_visit_level(base_dir, 0, [])
def _test_read_common_metadata_files(fs, base_path):
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
base_path = str(base_path)
data_path = os.path.join(base_path, 'data.parquet')
table = pa.Table.from_pandas(df)
with fs.open(data_path, 'wb') as f:
_write_table(table, f)
metadata_path = os.path.join(base_path, '_common_metadata')
with fs.open(metadata_path, 'wb') as f:
pq.write_metadata(table.schema, f)
dataset = pq.ParquetDataset(base_path, filesystem=fs)
assert dataset.common_metadata_path == str(metadata_path)
with fs.open(data_path) as f:
common_schema = pq.read_metadata(f).schema
assert dataset.schema.equals(common_schema)
# handle list of one directory
dataset2 = pq.ParquetDataset([base_path], filesystem=fs)
assert dataset2.schema.equals(dataset.schema)
@pytest.mark.pandas
def test_read_common_metadata_files(tempdir):
fs = LocalFileSystem.get_instance()
_test_read_common_metadata_files(fs, tempdir)
@pytest.mark.pandas
def test_read_metadata_files(tempdir):
fs = LocalFileSystem.get_instance()
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
data_path = tempdir / 'data.parquet'
table = pa.Table.from_pandas(df)
with fs.open(data_path, 'wb') as f:
_write_table(table, f)
metadata_path = tempdir / '_metadata'
with fs.open(metadata_path, 'wb') as f:
pq.write_metadata(table.schema, f)
dataset = pq.ParquetDataset(tempdir, filesystem=fs)
assert dataset.metadata_path == str(metadata_path)
with fs.open(data_path) as f:
metadata_schema = pq.read_metadata(f).schema
assert dataset.schema.equals(metadata_schema)
@pytest.mark.pandas
def test_read_schema(tempdir):
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
data_path = tempdir / 'test.parquet'
table = pa.Table.from_pandas(df)
_write_table(table, data_path)
read1 = pq.read_schema(data_path)
read2 = pq.read_schema(data_path, memory_map=True)
assert table.schema.equals(read1, check_metadata=False)
assert table.schema.equals(read2, check_metadata=False)
assert table.schema.metadata[b'pandas'] == read1.metadata[b'pandas']
def _filter_partition(df, part_keys):
predicate = np.ones(len(df), dtype=bool)
to_drop = []
for name, value in part_keys:
to_drop.append(name)
# to avoid pandas warning
if isinstance(value, (datetime.date, datetime.datetime)):
value = pd.Timestamp(value)
predicate &= df[name] == value
return df[predicate].drop(to_drop, axis=1)
@pytest.mark.pandas
def test_read_multiple_files(tempdir):
nfiles = 10
size = 5
dirpath = tempdir / guid()
dirpath.mkdir()
test_data = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
# Hack so that we don't have a dtype cast in v1 files
df['uint32'] = df['uint32'].astype(np.int64)
path = dirpath / '{}.parquet'.format(i)
table = pa.Table.from_pandas(df)
_write_table(table, path)
test_data.append(table)
paths.append(path)
# Write a _SUCCESS.crc file
(dirpath / '_SUCCESS.crc').touch()
def read_multiple_files(paths, columns=None, use_threads=True, **kwargs):
dataset = pq.ParquetDataset(paths, **kwargs)
return dataset.read(columns=columns, use_threads=use_threads)
result = read_multiple_files(paths)
expected = pa.concat_tables(test_data)
assert result.equals(expected)
# Read with provided metadata
metadata = pq.read_metadata(paths[0])
result2 = read_multiple_files(paths, metadata=metadata)
assert result2.equals(expected)
result3 = pa.localfs.read_parquet(dirpath, schema=metadata.schema)
assert result3.equals(expected)
# Read column subset
to_read = [0, 2, 6, result.num_columns - 1]
col_names = [result.field(i).name for i in to_read]
out = pa.localfs.read_parquet(dirpath, columns=col_names)
expected = pa.Table.from_arrays([result.column(i) for i in to_read],
names=col_names,
metadata=result.schema.metadata)
assert out.equals(expected)
# Read with multiple threads
pa.localfs.read_parquet(dirpath, use_threads=True)
# Test failure modes with non-uniform metadata
bad_apple = _test_dataframe(size, seed=i).iloc[:, :4]
bad_apple_path = tempdir / '{}.parquet'.format(guid())
t = pa.Table.from_pandas(bad_apple)
_write_table(t, bad_apple_path)
bad_meta = pq.read_metadata(bad_apple_path)
with pytest.raises(ValueError):
read_multiple_files(paths + [bad_apple_path])
with pytest.raises(ValueError):
read_multiple_files(paths, metadata=bad_meta)
mixed_paths = [bad_apple_path, paths[0]]
with pytest.raises(ValueError):
read_multiple_files(mixed_paths, schema=bad_meta.schema)
with pytest.raises(ValueError):
read_multiple_files(mixed_paths)
@pytest.mark.pandas
def test_dataset_read_pandas(tempdir):
nfiles = 5
size = 5
dirpath = tempdir / guid()
dirpath.mkdir()
test_data = []
frames = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
df.index = np.arange(i * size, (i + 1) * size)
df.index.name = 'index'
path = dirpath / '{}.parquet'.format(i)
table = pa.Table.from_pandas(df)
_write_table(table, path)
test_data.append(table)
frames.append(df)
paths.append(path)
dataset = pq.ParquetDataset(dirpath)
columns = ['uint8', 'strings']
result = dataset.read_pandas(columns=columns).to_pandas()
expected = pd.concat([x[columns] for x in frames])
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_dataset_no_memory_map(tempdir):
# ARROW-2627: Check that we can use ParquetDataset without memory-mapping
dirpath = tempdir / guid()
dirpath.mkdir()
df = _test_dataframe(10, seed=0)
path = dirpath / '{}.parquet'.format(0)
table = pa.Table.from_pandas(df)
_write_table(table, path, version='2.0')
# TODO(wesm): Not sure how to easily check that memory mapping is _not_
# used. Mocking is not especially easy for pa.memory_map
dataset = pq.ParquetDataset(dirpath, memory_map=False)
assert dataset.pieces[0].read().equals(table)
@pytest.mark.pandas
@pytest.mark.parametrize('preserve_index', [True, False, None])
def test_dataset_read_pandas_common_metadata(tempdir, preserve_index):
# ARROW-1103
nfiles = 5
size = 5
dirpath = tempdir / guid()
dirpath.mkdir()
test_data = []
frames = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
df.index = pd.Index(np.arange(i * size, (i + 1) * size), name='index')
path = dirpath / '{}.parquet'.format(i)
table = pa.Table.from_pandas(df, preserve_index=preserve_index)
# Obliterate metadata
table = table.replace_schema_metadata(None)
assert table.schema.metadata is None
_write_table(table, path)
test_data.append(table)
frames.append(df)
paths.append(path)
# Write _metadata common file
table_for_metadata = pa.Table.from_pandas(
df, preserve_index=preserve_index
)
pq.write_metadata(table_for_metadata.schema, dirpath / '_metadata')
dataset = pq.ParquetDataset(dirpath)
columns = ['uint8', 'strings']
result = dataset.read_pandas(columns=columns).to_pandas()
expected = pd.concat([x[columns] for x in frames])
expected.index.name = (
df.index.name if preserve_index is not False else None)
tm.assert_frame_equal(result, expected)
def _make_example_multifile_dataset(base_path, nfiles=10, file_nrows=5):
test_data = []
paths = []
for i in range(nfiles):
df = _test_dataframe(file_nrows, seed=i)
path = base_path / '{}.parquet'.format(i)
test_data.append(_write_table(df, path))
paths.append(path)
return paths
@pytest.mark.pandas
def test_ignore_private_directories(tempdir):
dirpath = tempdir / guid()
dirpath.mkdir()
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
# private directory
(dirpath / '_impala_staging').mkdir()
dataset = pq.ParquetDataset(dirpath)
assert set(map(str, paths)) == set(x.path for x in dataset.pieces)
@pytest.mark.pandas
def test_ignore_hidden_files_dot(tempdir):
dirpath = tempdir / guid()
dirpath.mkdir()
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
with (dirpath / '.DS_Store').open('wb') as f:
f.write(b'gibberish')
with (dirpath / '.private').open('wb') as f:
f.write(b'gibberish')
dataset = pq.ParquetDataset(dirpath)
assert set(map(str, paths)) == set(x.path for x in dataset.pieces)
@pytest.mark.pandas
def test_ignore_hidden_files_underscore(tempdir):
dirpath = tempdir / guid()
dirpath.mkdir()
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
with (dirpath / '_committed_123').open('wb') as f:
f.write(b'abcd')
with (dirpath / '_started_321').open('wb') as f:
f.write(b'abcd')
dataset = pq.ParquetDataset(dirpath)
assert set(map(str, paths)) == set(x.path for x in dataset.pieces)
@pytest.mark.pandas
def test_multiindex_duplicate_values(tempdir):
num_rows = 3
numbers = list(range(num_rows))
index = pd.MultiIndex.from_arrays(
[['foo', 'foo', 'bar'], numbers],
names=['foobar', 'some_numbers'],
)
df = pd.DataFrame({'numbers': numbers}, index=index)
table = pa.Table.from_pandas(df)
filename = tempdir / 'dup_multi_index_levels.parquet'
_write_table(table, filename)
result_table = _read_table(filename)
assert table.equals(result_table)
result_df = result_table.to_pandas()
tm.assert_frame_equal(result_df, df)
@pytest.mark.pandas
def test_write_error_deletes_incomplete_file(tempdir):
# ARROW-1285
df = pd.DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'i': pd.date_range('20130101', periods=3, freq='ns')})
pdf = pa.Table.from_pandas(df)
filename = tempdir / 'tmp_file'
try:
_write_table(pdf, filename)
except pa.ArrowException:
pass
assert not filename.exists()
@pytest.mark.pandas
def test_noncoerced_nanoseconds_written_without_exception(tempdir):
# ARROW-1957: the Parquet version 2.0 writer preserves Arrow
# nanosecond timestamps by default
n = 9
df = pd.DataFrame({'x': range(n)},
index=pd.DatetimeIndex(start='2017-01-01',
freq='1n',
periods=n))
tb = pa.Table.from_pandas(df)
filename = tempdir / 'written.parquet'
try:
pq.write_table(tb, filename, version='2.0')
except Exception:
pass
assert filename.exists()
recovered_table = pq.read_table(filename)
assert tb.equals(recovered_table)
# Loss of data thru coercion (without explicit override) still an error
filename = tempdir / 'not_written.parquet'
with pytest.raises(ValueError):
pq.write_table(tb, filename, coerce_timestamps='ms', version='2.0')
def test_read_non_existent_file(tempdir):
path = 'non-existent-file.parquet'
try:
pq.read_table(path)
except Exception as e:
assert path in e.args[0]
def test_read_table_doesnt_warn(datadir):
with pytest.warns(None) as record:
pq.read_table(datadir / 'v0.7.1.parquet')
assert len(record) == 0
def _test_write_to_dataset_with_partitions(base_path,
filesystem=None,
schema=None,
index_name=None):
# ARROW-1400
output_df = pd.DataFrame({'group1': list('aaabbbbccc'),
'group2': list('eefeffgeee'),
'num': list(range(10)),
'nan': [pd.np.nan] * 10,
'date': np.arange('2017-01-01', '2017-01-11',
dtype='datetime64[D]')})
cols = output_df.columns.tolist()
partition_by = ['group1', 'group2']
output_table = pa.Table.from_pandas(output_df, schema=schema, safe=False,
preserve_index=False)
pq.write_to_dataset(output_table, base_path, partition_by,
filesystem=filesystem)
metadata_path = os.path.join(base_path, '_common_metadata')
if filesystem is not None:
with filesystem.open(metadata_path, 'wb') as f:
pq.write_metadata(output_table.schema, f)
else:
pq.write_metadata(output_table.schema, metadata_path)
# ARROW-2891: Ensure the output_schema is preserved when writing a
# partitioned dataset
dataset = pq.ParquetDataset(base_path,
filesystem=filesystem,
validate_schema=True)
# ARROW-2209: Ensure the dataset schema also includes the partition columns
dataset_cols = set(dataset.schema.to_arrow_schema().names)
assert dataset_cols == set(output_table.schema.names)
input_table = dataset.read()
input_df = input_table.to_pandas()
# Read data back in and compare with original DataFrame
# Partitioned columns added to the end of the DataFrame when read
input_df_cols = input_df.columns.tolist()
assert partition_by == input_df_cols[-1 * len(partition_by):]
# Partitioned columns become 'categorical' dtypes
input_df = input_df[cols]
for col in partition_by:
output_df[col] = output_df[col].astype('category')
assert output_df.equals(input_df)
def _test_write_to_dataset_no_partitions(base_path, filesystem=None):
# ARROW-1400
output_df = pd.DataFrame({'group1': list('aaabbbbccc'),
'group2': list('eefeffgeee'),
'num': list(range(10)),
'date': np.arange('2017-01-01', '2017-01-11',
dtype='datetime64[D]')})
cols = output_df.columns.tolist()
output_table = pa.Table.from_pandas(output_df)
if filesystem is None:
filesystem = LocalFileSystem.get_instance()
# Without partitions, append files to root_path
n = 5
for i in range(n):
pq.write_to_dataset(output_table, base_path,
filesystem=filesystem)
output_files = [file for file in filesystem.ls(base_path)
if file.endswith(".parquet")]
assert len(output_files) == n
# Deduplicated incoming DataFrame should match
# original outgoing Dataframe
input_table = pq.ParquetDataset(base_path,
filesystem=filesystem).read()
input_df = input_table.to_pandas()
input_df = input_df.drop_duplicates()
input_df = input_df[cols]
assert output_df.equals(input_df)
@pytest.mark.pandas
def test_write_to_dataset_with_partitions(tempdir):
_test_write_to_dataset_with_partitions(str(tempdir))
@pytest.mark.pandas
def test_write_to_dataset_with_partitions_and_schema(tempdir):
schema = pa.schema([pa.field('group1', type=pa.string()),
pa.field('group2', type=pa.string()),
pa.field('num', type=pa.int64()),
pa.field('nan', type=pa.int32()),
pa.field('date', type=pa.timestamp(unit='us'))])
_test_write_to_dataset_with_partitions(str(tempdir), schema=schema)
@pytest.mark.pandas
def test_write_to_dataset_with_partitions_and_index_name(tempdir):
_test_write_to_dataset_with_partitions(str(tempdir),
index_name='index_name')
@pytest.mark.pandas
def test_write_to_dataset_no_partitions(tempdir):
_test_write_to_dataset_no_partitions(str(tempdir))
@pytest.mark.pandas
def test_write_to_dataset_with_partitions_and_custom_filenames(tempdir):
output_df = pd.DataFrame({'group1': list('aaabbbbccc'),
'group2': list('eefeffgeee'),
'num': list(range(10)),
'nan': [pd.np.nan] * 10,
'date': np.arange('2017-01-01', '2017-01-11',
dtype='datetime64[D]')})
partition_by = ['group1', 'group2']
output_table = pa.Table.from_pandas(output_df)
path = str(tempdir)
def partition_filename_callback(keys):
return "{0}-{1}.parquet".format(*keys)
pq.write_to_dataset(output_table, path,
partition_by, partition_filename_callback)
dataset = pq.ParquetDataset(path)
# ARROW-3538: Ensure partition filenames match the given pattern
# defined in the local function partition_filename_callback
expected_basenames = [
'a-e.parquet', 'a-f.parquet',
'b-e.parquet', 'b-f.parquet',
'b-g.parquet', 'c-e.parquet'
]
output_basenames = [os.path.basename(p.path) for p in dataset.pieces]
assert sorted(expected_basenames) == sorted(output_basenames)
@pytest.mark.large_memory
def test_large_table_int32_overflow():
size = np.iinfo('int32').max + 1
arr = np.ones(size, dtype='uint8')
parr = pa.array(arr, type=pa.uint8())
table = pa.Table.from_arrays([parr], names=['one'])
f = io.BytesIO()
_write_table(table, f)
def _simple_table_roundtrip(table):
stream = pa.BufferOutputStream()
_write_table(table, stream)
buf = stream.getvalue()
return _read_table(buf)
@pytest.mark.pandas
@pytest.mark.large_memory
def test_binary_array_overflow_to_chunked():
# ARROW-3762
# 2^31 + 1 bytes
values = [b'x'] + [
b'x' * (1 << 20)
] * 2 * (1 << 10)
df = pd.DataFrame({'byte_col': values})
tbl = pa.Table.from_pandas(df, preserve_index=False)
read_tbl = _simple_table_roundtrip(tbl)
col0_data = read_tbl[0]
assert isinstance(col0_data, pa.ChunkedArray)
# Split up into 2GB chunks
assert col0_data.num_chunks == 2
assert tbl.equals(read_tbl)
@pytest.mark.pandas
@pytest.mark.large_memory
def test_list_of_binary_large_cell():
# ARROW-4688
data = []
# TODO(wesm): handle chunked children
# 2^31 - 1 bytes in a single cell
# data.append([b'x' * (1 << 20)] * 2047 + [b'x' * ((1 << 20) - 1)])
# A little under 2GB in cell each containing approximately 10MB each
data.extend([[b'x' * 1000000] * 10] * 214)
arr = pa.array(data)
table = pa.Table.from_arrays([arr], ['chunky_cells'])
read_table = _simple_table_roundtrip(table)
assert table.equals(read_table)
@pytest.mark.pandas
def test_index_column_name_duplicate(tempdir):
data = {
'close': {
pd.Timestamp('2017-06-30 01:31:00'): 154.99958999999998,
pd.Timestamp('2017-06-30 01:32:00'): 154.99958999999998,
},
'time': {
pd.Timestamp('2017-06-30 01:31:00'): pd.Timestamp(
'2017-06-30 01:31:00'
),
pd.Timestamp('2017-06-30 01:32:00'): pd.Timestamp(
'2017-06-30 01:32:00'
),
}
}
path = str(tempdir / 'data.parquet')
dfx = pd.DataFrame(data).set_index('time', drop=False)
tdfx = pa.Table.from_pandas(dfx)
_write_table(tdfx, path)
arrow_table = _read_table(path)
result_df = arrow_table.to_pandas()
tm.assert_frame_equal(result_df, dfx)
@pytest.mark.pandas
def test_parquet_nested_convenience(tempdir):
# ARROW-1684
df = pd.DataFrame({
'a': [[1, 2, 3], None, [4, 5], []],
'b': [[1.], None, None, [6., 7.]],
})
path = str(tempdir / 'nested_convenience.parquet')
table = pa.Table.from_pandas(df, preserve_index=False)
_write_table(table, path)
read = pq.read_table(path, columns=['a'])
tm.assert_frame_equal(read.to_pandas(), df[['a']])
read = pq.read_table(path, columns=['a', 'b'])
tm.assert_frame_equal(read.to_pandas(), df)
@pytest.mark.pandas
def test_backwards_compatible_index_naming(datadir):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(io.BytesIO(expected_string), sep=r'\s{2,}',
index_col=None, header=0, engine='python')
table = _read_table(datadir / 'v0.7.1.parquet')
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_backwards_compatible_index_multi_level_named(datadir):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(
io.BytesIO(expected_string), sep=r'\s{2,}',
index_col=['cut', 'color', 'clarity'],
header=0, engine='python'
).sort_index()
table = _read_table(datadir / 'v0.7.1.all-named-index.parquet')
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_backwards_compatible_index_multi_level_some_named(datadir):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(
io.BytesIO(expected_string),
sep=r'\s{2,}', index_col=['cut', 'color', 'clarity'],
header=0, engine='python'
).sort_index()
expected.index = expected.index.set_names(['cut', None, 'clarity'])
table = _read_table(datadir / 'v0.7.1.some-named-index.parquet')
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_backwards_compatible_column_metadata_handling(datadir):
expected = pd.DataFrame(
{'a': [1, 2, 3], 'b': [.1, .2, .3],
'c': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')})
expected.index = pd.MultiIndex.from_arrays(
[['a', 'b', 'c'],
pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')],
names=['index', None])
path = datadir / 'v0.7.1.column-metadata-handling.parquet'
table = _read_table(path)
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
table = _read_table(path, columns=['a'])
result = table.to_pandas()
tm.assert_frame_equal(result, expected[['a']].reset_index(drop=True))
def _make_dataset_for_pickling(tempdir, N=100):
path = tempdir / 'data.parquet'
fs = LocalFileSystem.get_instance()
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
table = pa.Table.from_pandas(df)
num_groups = 3
with pq.ParquetWriter(path, table.schema) as writer:
for i in range(num_groups):
writer.write_table(table)
reader = pq.ParquetFile(path)
assert reader.metadata.num_row_groups == num_groups
metadata_path = tempdir / '_metadata'
with fs.open(metadata_path, 'wb') as f:
pq.write_metadata(table.schema, f)
dataset = pq.ParquetDataset(tempdir, filesystem=fs)
assert dataset.metadata_path == str(metadata_path)
return dataset
@pytest.mark.pandas
@pytest.mark.parametrize('pickler', [
pytest.param(pickle, id='builtin'),
pytest.param(pytest.importorskip('cloudpickle'), id='cloudpickle')
])
def test_pickle_dataset(tempdir, datadir, pickler):
def is_pickleable(obj):
return obj == pickler.loads(pickler.dumps(obj))
dataset = _make_dataset_for_pickling(tempdir)
assert is_pickleable(dataset)
assert is_pickleable(dataset.metadata)
assert is_pickleable(dataset.metadata.schema)
assert len(dataset.metadata.schema)
for column in dataset.metadata.schema:
assert is_pickleable(column)
for piece in dataset.pieces:
assert is_pickleable(piece)
metadata = piece.get_metadata()
assert metadata.num_row_groups
for i in range(metadata.num_row_groups):
assert is_pickleable(metadata.row_group(i))
@pytest.mark.pandas
def test_decimal_roundtrip(tempdir):
num_values = 10
columns = {}
for precision in range(1, 39):
for scale in range(0, precision + 1):
with util.random_seed(0):
random_decimal_values = [
util.randdecimal(precision, scale)
for _ in range(num_values)
]
column_name = ('dec_precision_{:d}_scale_{:d}'
.format(precision, scale))
columns[column_name] = random_decimal_values
expected = pd.DataFrame(columns)
filename = tempdir / 'decimals.parquet'
string_filename = str(filename)
table = pa.Table.from_pandas(expected)
_write_table(table, string_filename)
result_table = _read_table(string_filename)
result = result_table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
@pytest.mark.xfail(
raises=pa.ArrowException, reason='Parquet does not support negative scale'
)
def test_decimal_roundtrip_negative_scale(tempdir):
expected = pd.DataFrame({'decimal_num': [decimal.Decimal('1.23E4')]})
filename = tempdir / 'decimals.parquet'
string_filename = str(filename)
t = pa.Table.from_pandas(expected)
_write_table(t, string_filename)
result_table = _read_table(string_filename)
result = result_table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_parquet_writer_context_obj(tempdir):
df = _test_dataframe(100)
df['unique_id'] = 0
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
out = pa.BufferOutputStream()
with pq.ParquetWriter(out, arrow_table.schema, version='2.0') as writer:
frames = []
for i in range(10):
df['unique_id'] = i
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
writer.write_table(arrow_table)
frames.append(df.copy())
buf = out.getvalue()
result = _read_table(pa.BufferReader(buf))
expected = pd.concat(frames, ignore_index=True)
tm.assert_frame_equal(result.to_pandas(), expected)
@pytest.mark.pandas
def test_parquet_writer_context_obj_with_exception(tempdir):
df = _test_dataframe(100)
df['unique_id'] = 0
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
out = pa.BufferOutputStream()
error_text = 'Artificial Error'
try:
with pq.ParquetWriter(out,
arrow_table.schema,
version='2.0') as writer:
frames = []
for i in range(10):
df['unique_id'] = i
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
writer.write_table(arrow_table)
frames.append(df.copy())
if i == 5:
raise ValueError(error_text)
except Exception as e:
assert str(e) == error_text
buf = out.getvalue()
result = _read_table(pa.BufferReader(buf))
expected = pd.concat(frames, ignore_index=True)
tm.assert_frame_equal(result.to_pandas(), expected)
@pytest.mark.pandas
def test_zlib_compression_bug():
# ARROW-3514: "zlib deflate failed, output buffer too small"
table = pa.Table.from_arrays([pa.array(['abc', 'def'])], ['some_col'])
f = io.BytesIO()
pq.write_table(table, f, compression='gzip')
f.seek(0)
roundtrip = pq.read_table(f)
tm.assert_frame_equal(roundtrip.to_pandas(), table.to_pandas())
@pytest.mark.pandas
def test_merging_parquet_tables_with_different_pandas_metadata(tempdir):
# ARROW-3728: Merging Parquet Files - Pandas Meta in Schema Mismatch
schema = pa.schema([
pa.field('int', pa.int16()),
pa.field('float', pa.float32()),
pa.field('string', pa.string())
])
df1 = pd.DataFrame({
'int': np.arange(3, dtype=np.uint8),
'float': np.arange(3, dtype=np.float32),
'string': ['ABBA', 'EDDA', 'ACDC']
})
df2 = pd.DataFrame({
'int': [4, 5],
'float': [1.1, None],
'string': [None, None]
})
table1 = pa.Table.from_pandas(df1, schema=schema, preserve_index=False)
table2 = pa.Table.from_pandas(df2, schema=schema, preserve_index=False)
assert not table1.schema.equals(table2.schema)
assert table1.schema.equals(table2.schema, check_metadata=False)
writer = pq.ParquetWriter(tempdir / 'merged.parquet', schema=schema)
writer.write_table(table1)
writer.write_table(table2)
def test_empty_row_groups(tempdir):
# ARROW-3020
table = pa.Table.from_arrays([pa.array([], type='int32')], ['f0'])
path = tempdir / 'empty_row_groups.parquet'
num_groups = 3
with pq.ParquetWriter(path, table.schema) as writer:
for i in range(num_groups):
writer.write_table(table)
reader = pq.ParquetFile(path)
assert reader.metadata.num_row_groups == num_groups
for i in range(num_groups):
assert reader.read_row_group(i).equals(table)
@pytest.mark.pandas
def test_parquet_writer_with_caller_provided_filesystem():
out = pa.BufferOutputStream()
class CustomFS(FileSystem):
def __init__(self):
self.path = None
self.mode = None
def open(self, path, mode='rb'):
self.path = path
self.mode = mode
return out
fs = CustomFS()
fname = 'expected_fname.parquet'
df = _test_dataframe(100)
table = pa.Table.from_pandas(df, preserve_index=False)
with pq.ParquetWriter(fname, table.schema, filesystem=fs, version='2.0') \
as writer:
writer.write_table(table)
assert fs.path == fname
assert fs.mode == 'wb'
assert out.closed
buf = out.getvalue()
table_read = _read_table(pa.BufferReader(buf))
df_read = table_read.to_pandas()
tm.assert_frame_equal(df_read, df)
# Should raise ValueError when filesystem is passed with file-like object
with pytest.raises(ValueError) as err_info:
pq.ParquetWriter(pa.BufferOutputStream(), table.schema, filesystem=fs)
expected_msg = ("filesystem passed but where is file-like, so"
" there is nothing to open with filesystem.")
assert str(err_info) == expected_msg
def test_writing_empty_lists():
# ARROW-2591: [Python] Segmentation fault issue in pq.write_table
arr1 = pa.array([[], []], pa.list_(pa.int32()))
table = pa.Table.from_arrays([arr1], ['list(int32)'])
_check_roundtrip(table)
def test_write_nested_zero_length_array_chunk_failure():
# Bug report in ARROW-3792
cols = OrderedDict(
int32=pa.int32(),
list_string=pa.list_(pa.string())
)
data = [[], [OrderedDict(int32=1, list_string=('G',)), ]]
# This produces a table with a column like
# <Column name='list_string' type=ListType(list<item: string>)>
# [
# [],
# [
# [
# "G"
# ]
# ]
# ]
#
# Each column is a ChunkedArray with 2 elements
my_arrays = [pa.array(batch, type=pa.struct(cols)).flatten()
for batch in data]
my_batches = [pa.RecordBatch.from_arrays(batch, pa.schema(cols))
for batch in my_arrays]
tbl = pa.Table.from_batches(my_batches, pa.schema(cols))
_check_roundtrip(tbl)
@pytest.mark.pandas
def test_partitioned_dataset(tempdir):
# ARROW-3208: Segmentation fault when reading a Parquet partitioned dataset
# to a Parquet file
path = tempdir / "ARROW-3208"
df = pd.DataFrame({
'one': [-1, 10, 2.5, 100, 1000, 1, 29.2],
'two': [-1, 10, 2, 100, 1000, 1, 11],
'three': [0, 0, 0, 0, 0, 0, 0]
})
table = pa.Table.from_pandas(df)
pq.write_to_dataset(table, root_path=str(path),
partition_cols=['one', 'two'])
table = pq.ParquetDataset(path).read()
pq.write_table(table, path / "output.parquet")
def test_read_column_invalid_index():
table = pa.table([pa.array([4, 5]), pa.array(["foo", "bar"])],
names=['ints', 'strs'])
bio = pa.BufferOutputStream()
pq.write_table(table, bio)
f = pq.ParquetFile(bio.getvalue())
assert f.reader.read_column(0).to_pylist() == [4, 5]
assert f.reader.read_column(1).to_pylist() == ["foo", "bar"]
for index in (-1, 2):
with pytest.raises((ValueError, IndexError)):
f.reader.read_column(index)
def test_direct_read_dictionary():
# ARROW-3325
repeats = 10
nunique = 5
data = [
[tm.rands(10) for i in range(nunique)] * repeats,
]
table = pa.table(data, names=['f0'])
bio = pa.BufferOutputStream()
pq.write_table(table, bio)
contents = bio.getvalue()
result = pq.read_table(pa.BufferReader(contents),
read_dictionary=['f0'])
# Compute dictionary-encoded subfield
expected = pa.table([table[0].dictionary_encode()], names=['f0'])
assert result.equals(expected)
def test_dataset_read_dictionary(tempdir):
path = tempdir / "ARROW-3325-dataset"
t1 = pa.table([[tm.rands(10) for i in range(5)] * 10], names=['f0'])
t2 = pa.table([[tm.rands(10) for i in range(5)] * 10], names=['f0'])
pq.write_to_dataset(t1, root_path=str(path))
pq.write_to_dataset(t2, root_path=str(path))
result = pq.ParquetDataset(path, read_dictionary=['f0']).read()
# The order of the chunks is non-deterministic
ex_chunks = [t1[0].chunk(0).dictionary_encode(),
t2[0].chunk(0).dictionary_encode()]
assert result[0].num_chunks == 2
c0, c1 = result[0].chunk(0), result[0].chunk(1)
if c0.equals(ex_chunks[0]):
assert c1.equals(ex_chunks[1])
else:
assert c0.equals(ex_chunks[1])
assert c1.equals(ex_chunks[0])
def test_direct_read_dictionary_subfield():
repeats = 10
nunique = 5
data = [
[[tm.rands(10)] for i in range(nunique)] * repeats,
]
table = pa.table(data, names=['f0'])
bio = pa.BufferOutputStream()
pq.write_table(table, bio)
contents = bio.getvalue()
result = pq.read_table(pa.BufferReader(contents),
read_dictionary=['f0.list.item'])
arr = pa.array(data[0])
values_as_dict = arr.values.dictionary_encode()
inner_indices = values_as_dict.indices.cast('int32')
new_values = pa.DictionaryArray.from_arrays(inner_indices,
values_as_dict.dictionary)
offsets = pa.array(range(51), type='int32')
expected_arr = pa.ListArray.from_arrays(offsets, new_values)
expected = pa.table([expected_arr], names=['f0'])
assert result.equals(expected)
assert result[0].num_chunks == 1
@pytest.mark.pandas
def test_dataset_metadata(tempdir):
path = tempdir / "ARROW-1983-dataset"
# create and write a test dataset
df = pd.DataFrame({
'one': [1, 2, 3],
'two': [-1, -2, -3],
'three': [[1, 2], [2, 3], [3, 4]],
})
table = pa.Table.from_pandas(df)
metadata_list = []
pq.write_to_dataset(table, root_path=str(path),
partition_cols=['one', 'two'],
metadata_collector=metadata_list)
# open the dataset and collect metadata from pieces:
dataset = pq.ParquetDataset(path)
metadata_list2 = [p.get_metadata() for p in dataset.pieces]
# compare metadata list content:
assert len(metadata_list) == len(metadata_list2)
for md, md2 in zip(metadata_list, metadata_list2):
d = md.to_dict()
d2 = md2.to_dict()
# serialized_size is initialized in the reader:
assert d.pop('serialized_size') == 0
assert d2.pop('serialized_size') > 0
assert d == d2
def test_parquet_file_too_small(tempdir):
path = str(tempdir / "test.parquet")
with pytest.raises(pa.ArrowIOError,
match='size is 0 bytes'):
with open(path, 'wb') as f:
pass
pq.read_table(path)
with pytest.raises(pa.ArrowIOError,
match='size is 4 bytes'):
with open(path, 'wb') as f:
f.write(b'ffff')
pq.read_table(path)
@pytest.mark.pandas
def test_categorical_index_survives_roundtrip():
# ARROW-3652, addressed by ARROW-3246
df = pd.DataFrame([['a', 'b'], ['c', 'd']], columns=['c1', 'c2'])
df['c1'] = df['c1'].astype('category')
df = df.set_index(['c1'])
table = pa.Table.from_pandas(df)
bos = pa.BufferOutputStream()
pq.write_table(table, bos)
ref_df = pq.read_pandas(bos.getvalue()).to_pandas()
assert isinstance(ref_df.index, pd.CategoricalIndex)
assert ref_df.index.equals(df.index)
def test_dictionary_array_automatically_read():
# ARROW-3246
# Make a large dictionary, a little over 4MB of data
dict_length = 4000
dict_values = pa.array([('x' * 1000 + '_{}'.format(i))
for i in range(dict_length)])
num_chunks = 10
chunk_size = 100
chunks = []
for i in range(num_chunks):
indices = np.random.randint(0, dict_length,
size=chunk_size).astype(np.int32)
chunks.append(pa.DictionaryArray.from_arrays(pa.array(indices),
dict_values))
table = pa.table([pa.chunked_array(chunks)], names=['f0'])
bio = pa.BufferOutputStream()
pq.write_table(table, bio)
contents = bio.getvalue()
result = pq.read_table(pa.BufferReader(contents))
assert result.equals(table)
# The only key in the metadata was the Arrow schema key
assert result.schema.metadata is None
@pytest.mark.pandas
def test_pandas_categorical_na_type_row_groups():
# ARROW-5085
df = pd.DataFrame({"col": [None] * 100, "int": [1.0] * 100})
df_category = df.astype({"col": "category", "int": "category"})
table = pa.Table.from_pandas(df)
table_cat = pa.Table.from_pandas(df_category)
buf = pa.BufferOutputStream()
# it works
pq.write_table(table_cat, buf, version="2.0", chunk_size=10)
result = pq.read_table(buf.getvalue())
# Result is non-categorical
assert result[0].equals(table[0])
assert result[1].equals(table[1])
@pytest.mark.pandas
def test_pandas_categorical_roundtrip():
# ARROW-5480, this was enabled by ARROW-3246
# Have one of the categories unobserved and include a null (-1)
codes = np.array([2, 0, 0, 2, 0, -1, 2], dtype='int32')
categories = ['foo', 'bar', 'baz']
df = pd.DataFrame({'x': pd.Categorical.from_codes(
codes, categories=categories)})
buf = pa.BufferOutputStream()
pq.write_table(pa.table(df), buf)
result = pq.read_table(buf.getvalue()).to_pandas()
assert result.x.dtype == 'category'
assert (result.x.cat.categories == categories).all()
tm.assert_frame_equal(result, df)
@pytest.mark.pandas
def test_multi_dataset_metadata(tempdir):
filenames = ["ARROW-1983-dataset.0", "ARROW-1983-dataset.1"]
metapath = str(tempdir / "_metadata")
# create a test dataset
df = pd.DataFrame({
'one': [1, 2, 3],
'two': [-1, -2, -3],
'three': [[1, 2], [2, 3], [3, 4]],
})
table = pa.Table.from_pandas(df)
# write dataset twice and collect/merge metadata
_meta = None
for filename in filenames:
meta = []
pq.write_table(table, str(tempdir / filename),
metadata_collector=meta)
meta[0].set_file_path(filename)
if _meta is None:
_meta = meta[0]
else:
_meta.append_row_groups(meta[0])
# Write merged metadata-only file
with open(metapath, "wb") as f:
_meta.write_metadata_file(f)
# Read back the metadata
meta = pq.read_metadata(metapath)
md = meta.to_dict()
_md = _meta.to_dict()
for key in _md:
if key != 'serialized_size':
assert _md[key] == md[key]
assert _md['num_columns'] == 3
assert _md['num_rows'] == 6
assert _md['num_row_groups'] == 2
assert _md['serialized_size'] == 0
assert md['serialized_size'] > 0
@pytest.mark.pandas
def test_filter_before_validate_schema(tempdir):
# ARROW-4076 apply filter before schema validation
# to avoid checking unneeded schemas
# create partitioned dataset with mismatching schemas which would
# otherwise raise if first validation all schemas
dir1 = tempdir / 'A=0'
dir1.mkdir()
table1 = pa.Table.from_pandas(pd.DataFrame({'B': [1, 2, 3]}))
pq.write_table(table1, dir1 / 'data.parquet')
dir2 = tempdir / 'A=1'
dir2.mkdir()
table2 = pa.Table.from_pandas(pd.DataFrame({'B': ['a', 'b', 'c']}))
pq.write_table(table2, dir2 / 'data.parquet')
# read single file using filter
table = pq.read_table(tempdir, filters=[[('A', '==', 0)]])
assert table.column('B').equals(pa.chunked_array([[1, 2, 3]]))
| 32.513709
| 79
| 0.627494
|
from collections import OrderedDict
import datetime
import decimal
import io
import json
import os
import six
import pickle
import pytest
import numpy as np
import pyarrow as pa
from pyarrow.compat import guid, u, BytesIO, unichar, PY2
from pyarrow.pandas_compat import _pandas_api
from pyarrow.tests import util
from pyarrow.filesystem import LocalFileSystem, FileSystem
try:
import pyarrow.parquet as pq
except ImportError:
pq = None
try:
import pandas as pd
import pandas.util.testing as tm
from .pandas_examples import dataframe_with_arrays, dataframe_with_lists
except ImportError:
pd = tm = None
pytestmark = pytest.mark.parquet
@pytest.fixture(scope='module')
def datadir(datadir):
return datadir / 'parquet'
def _write_table(table, path, **kwargs):
import pyarrow.parquet as pq
if _pandas_api.is_data_frame(table):
table = pa.Table.from_pandas(table)
pq.write_table(table, path, **kwargs)
return table
def _read_table(*args, **kwargs):
return pq.read_table(*args, **kwargs)
def _roundtrip_table(table, read_table_kwargs=None,
write_table_kwargs=None):
read_table_kwargs = read_table_kwargs or {}
write_table_kwargs = write_table_kwargs or {}
buf = io.BytesIO()
_write_table(table, buf, **write_table_kwargs)
buf.seek(0)
return _read_table(buf, **read_table_kwargs)
def _check_roundtrip(table, expected=None, read_table_kwargs=None,
**write_table_kwargs):
if expected is None:
expected = table
read_table_kwargs = read_table_kwargs or {}
result = _roundtrip_table(table, read_table_kwargs=read_table_kwargs,
write_table_kwargs=write_table_kwargs)
assert result.equals(expected)
result = _roundtrip_table(result, read_table_kwargs=read_table_kwargs,
write_table_kwargs=write_table_kwargs)
assert result.equals(expected)
def _roundtrip_pandas_dataframe(df, write_kwargs):
table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(table, buf, **write_kwargs)
buf.seek(0)
table1 = _read_table(buf)
return table1.to_pandas()
@pytest.mark.parametrize('dtype', [int, float])
def test_single_pylist_column_roundtrip(tempdir, dtype):
filename = tempdir / 'single_{}_column.parquet'.format(dtype.__name__)
data = [pa.array(list(map(dtype, range(5))))]
table = pa.Table.from_arrays(data, names=['a'])
_write_table(table, filename)
table_read = _read_table(filename)
for i in range(table.num_columns):
col_written = table[i]
col_read = table_read[i]
assert table.field(i).name == table_read.field(i).name
assert col_read.num_chunks == 1
data_written = col_written.chunk(0)
data_read = col_read.chunk(0)
assert data_written.equals(data_read)
def alltypes_sample(size=10000, seed=0, categorical=False):
np.random.seed(seed)
arrays = {
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'datetime': np.arange("2016-01-01T00:00:00.001", size,
dtype='datetime64[ms]'),
'str': pd.Series([str(x) for x in range(size)]),
'empty_str': [''] * size,
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'null': [None] * size,
'null_list': [None] * 2 + [[None] * (x % 4) for x in range(size - 2)],
}
if categorical:
arrays['str_category'] = arrays['str'].astype('category')
return pd.DataFrame(arrays)
@pytest.mark.pandas
@pytest.mark.parametrize('chunk_size', [None, 1000])
def test_pandas_parquet_2_0_roundtrip(tempdir, chunk_size):
df = alltypes_sample(size=10000, categorical=True)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert arrow_table.schema.pandas_metadata is not None
_write_table(arrow_table, filename, version="2.0",
coerce_timestamps='ms', chunk_size=chunk_size)
table_read = pq.read_pandas(filename)
assert table_read.schema.pandas_metadata is not None
assert arrow_table.schema.metadata == table_read.schema.metadata
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
def test_set_data_page_size():
arr = pa.array([1, 2, 3] * 1000000)
t = pa.Table.from_arrays([arr], names=['f0'])
page_sizes = [2 << 16, 2 << 17, 2 << 18]
for target_page_size in page_sizes:
_check_roundtrip(t, data_page_size=target_page_size)
@pytest.mark.pandas
def test_chunked_table_write():
df = alltypes_sample(size=10)
batch = pa.RecordBatch.from_pandas(df)
table = pa.Table.from_batches([batch] * 3)
_check_roundtrip(table, version='2.0')
df, _ = dataframe_with_lists()
batch = pa.RecordBatch.from_pandas(df)
table = pa.Table.from_batches([batch] * 3)
_check_roundtrip(table, version='2.0')
@pytest.mark.pandas
def test_no_memory_map(tempdir):
df = alltypes_sample(size=10)
table = pa.Table.from_pandas(df)
_check_roundtrip(table, read_table_kwargs={'memory_map': False},
version='2.0')
filename = str(tempdir / 'tmp_file')
with open(filename, 'wb') as f:
_write_table(table, f, version='2.0')
table_read = pq.read_pandas(filename, memory_map=False)
assert table_read.equals(table)
def test_special_chars_filename(tempdir):
table = pa.Table.from_arrays([pa.array([42])], ["ints"])
filename = "foo # bar"
path = tempdir / filename
assert not path.exists()
_write_table(table, str(path))
assert path.exists()
table_read = _read_table(str(path))
assert table_read.equals(table)
@pytest.mark.pandas
def test_empty_table_roundtrip():
df = alltypes_sample(size=10)
table = pa.Table.from_pandas(df)
table = pa.Table.from_arrays(
[col.chunk(0)[:0] for col in table.itercolumns()],
names=table.schema.names)
assert table.schema.field_by_name('null').type == pa.null()
assert table.schema.field_by_name('null_list').type == pa.list_(pa.null())
_check_roundtrip(table, version='2.0')
@pytest.mark.pandas
def test_empty_table_no_columns():
df = pd.DataFrame()
empty = pa.Table.from_pandas(df, preserve_index=False)
_check_roundtrip(empty)
def test_empty_lists_table_roundtrip():
arr = pa.array([[], []], type=pa.list_(pa.int32()))
table = pa.Table.from_arrays([arr], ["A"])
_check_roundtrip(table)
@pytest.mark.pandas
def test_pandas_parquet_datetime_tz():
s = pd.Series([datetime.datetime(2017, 9, 6)])
s = s.dt.tz_localize('utc')
s.index = s
# Both a column and an index to hit both use cases
df = pd.DataFrame({'tz_aware': s,
'tz_eastern': s.dt.tz_convert('US/Eastern')},
index=s)
f = BytesIO()
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, f, coerce_timestamps='ms')
f.seek(0)
table_read = pq.read_pandas(f)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
@pytest.mark.skipif(six.PY2, reason='datetime.timezone is available since '
'python version 3.2')
def test_datetime_timezone_tzinfo():
value = datetime.datetime(2018, 1, 1, 1, 23, 45,
tzinfo=datetime.timezone.utc)
df = pd.DataFrame({'foo': [value]})
_roundtrip_pandas_dataframe(df, write_kwargs={})
@pytest.mark.pandas
def test_pandas_parquet_custom_metadata(tempdir):
df = alltypes_sample(size=10000)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert b'pandas' in arrow_table.schema.metadata
_write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')
metadata = pq.read_metadata(filename).metadata
assert b'pandas' in metadata
js = json.loads(metadata[b'pandas'].decode('utf8'))
assert js['index_columns'] == [{'kind': 'range',
'name': None,
'start': 0, 'stop': 10000,
'step': 1}]
@pytest.mark.pandas
def test_pandas_parquet_column_multiindex(tempdir):
df = alltypes_sample(size=10)
df.columns = pd.MultiIndex.from_tuples(
list(zip(df.columns, df.columns[::-1])),
names=['level_1', 'level_2']
)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert arrow_table.schema.pandas_metadata is not None
_write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')
table_read = pq.read_pandas(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_parquet_2_0_roundtrip_read_pandas_no_index_written(tempdir):
df = alltypes_sample(size=10000)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
js = arrow_table.schema.pandas_metadata
assert not js['index_columns']
# ARROW-2170
# While index_columns should be empty, columns needs to be filled still.
assert js['columns']
_write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')
table_read = pq.read_pandas(filename)
js = table_read.schema.pandas_metadata
assert not js['index_columns']
assert arrow_table.schema.metadata == table_read.schema.metadata
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_parquet_1_0_roundtrip(tempdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, filename, version='1.0')
table_read = _read_table(filename)
df_read = table_read.to_pandas()
# We pass uint32_t as int64_t if we write Parquet version 1.0
df['uint32'] = df['uint32'].values.astype(np.int64)
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_multiple_path_types(tempdir):
# Test compatibility with PEP 519 path-like objects
path = tempdir / 'zzz.parquet'
df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)})
_write_table(df, path)
table_read = _read_table(path)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
# Test compatibility with plain string paths
path = str(tempdir) + 'zzz.parquet'
df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)})
_write_table(df, path)
table_read = _read_table(path)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_column_selection(tempdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16)
})
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, filename)
table_read = _read_table(filename, columns=['uint8'])
df_read = table_read.to_pandas()
tm.assert_frame_equal(df[['uint8']], df_read)
# ARROW-4267: Selection of duplicate columns still leads to these columns
# being read uniquely.
table_read = _read_table(filename, columns=['uint8', 'uint8'])
df_read = table_read.to_pandas()
tm.assert_frame_equal(df[['uint8']], df_read)
def _random_integers(size, dtype):
# We do not generate integers outside the int64 range
platform_int_info = np.iinfo('int_')
iinfo = np.iinfo(dtype)
return np.random.randint(max(iinfo.min, platform_int_info.min),
min(iinfo.max, platform_int_info.max),
size=size).astype(dtype)
def _test_dataframe(size=10000, seed=0):
np.random.seed(seed)
df = pd.DataFrame({
'uint8': _random_integers(size, np.uint8),
'uint16': _random_integers(size, np.uint16),
'uint32': _random_integers(size, np.uint32),
'uint64': _random_integers(size, np.uint64),
'int8': _random_integers(size, np.int8),
'int16': _random_integers(size, np.int16),
'int32': _random_integers(size, np.int32),
'int64': _random_integers(size, np.int64),
'float32': np.random.randn(size).astype(np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'strings': [tm.rands(10) for i in range(size)],
'all_none': [None] * size,
'all_none_category': [None] * size
})
# TODO(PARQUET-1015)
# df['all_none_category'] = df['all_none_category'].astype('category')
return df
@pytest.mark.pandas
def test_pandas_parquet_native_file_roundtrip(tempdir):
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = _read_table(reader).to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_parquet_incremental_file_build(tempdir):
df = _test_dataframe(100)
df['unique_id'] = 0
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
out = pa.BufferOutputStream()
writer = pq.ParquetWriter(out, arrow_table.schema, version='2.0')
frames = []
for i in range(10):
df['unique_id'] = i
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
writer.write_table(arrow_table)
frames.append(df.copy())
writer.close()
buf = out.getvalue()
result = _read_table(pa.BufferReader(buf))
expected = pd.concat(frames, ignore_index=True)
tm.assert_frame_equal(result.to_pandas(), expected)
@pytest.mark.pandas
def test_read_pandas_column_subset(tempdir):
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = pq.read_pandas(reader, columns=['strings', 'uint8']).to_pandas()
tm.assert_frame_equal(df[['strings', 'uint8']], df_read)
@pytest.mark.pandas
def test_pandas_parquet_empty_roundtrip(tempdir):
df = _test_dataframe(0)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = _read_table(reader).to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_parquet_pyfile_roundtrip(tempdir):
filename = tempdir / 'pandas_pyfile_roundtrip.parquet'
size = 5
df = pd.DataFrame({
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'strings': ['foo', 'bar', None, 'baz', 'qux']
})
arrow_table = pa.Table.from_pandas(df)
with filename.open('wb') as f:
_write_table(arrow_table, f, version="1.0")
data = io.BytesIO(filename.read_bytes())
table_read = _read_table(data)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_parquet_configuration_options(tempdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0
})
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
for use_dictionary in [True, False]:
_write_table(arrow_table, filename, version='2.0',
use_dictionary=use_dictionary)
table_read = _read_table(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
for write_statistics in [True, False]:
_write_table(arrow_table, filename, version='2.0',
write_statistics=write_statistics)
table_read = _read_table(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
for compression in ['NONE', 'SNAPPY', 'GZIP', 'LZ4', 'ZSTD']:
_write_table(arrow_table, filename, version='2.0',
compression=compression)
table_read = _read_table(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
def make_sample_file(table_or_df):
if isinstance(table_or_df, pa.Table):
a_table = table_or_df
else:
a_table = pa.Table.from_pandas(table_or_df)
buf = io.BytesIO()
_write_table(a_table, buf, compression='SNAPPY', version='2.0',
coerce_timestamps='ms')
buf.seek(0)
return pq.ParquetFile(buf)
@pytest.mark.pandas
def test_parquet_metadata_api():
df = alltypes_sample(size=10000)
df = df.reindex(columns=sorted(df.columns))
df.index = np.random.randint(0, 1000000, size=len(df))
fileh = make_sample_file(df)
ncols = len(df.columns)
# Series of sniff tests
meta = fileh.metadata
repr(meta)
assert meta.num_rows == len(df)
assert meta.num_columns == ncols + 1 # +1 for index
assert meta.num_row_groups == 1
assert meta.format_version == '2.0'
assert 'parquet-cpp' in meta.created_by
assert isinstance(meta.serialized_size, int)
assert isinstance(meta.metadata, dict)
# Schema
schema = fileh.schema
assert meta.schema is schema
assert len(schema) == ncols + 1 # +1 for index
repr(schema)
col = schema[0]
repr(col)
assert col.name == df.columns[0]
assert col.max_definition_level == 1
assert col.max_repetition_level == 0
assert col.max_repetition_level == 0
assert col.physical_type == 'BOOLEAN'
assert col.converted_type == 'NONE'
with pytest.raises(IndexError):
schema[ncols + 1] # +1 for index
with pytest.raises(IndexError):
schema[-1]
# Row group
for rg in range(meta.num_row_groups):
rg_meta = meta.row_group(rg)
assert isinstance(rg_meta, pq.RowGroupMetaData)
repr(rg_meta)
for col in range(rg_meta.num_columns):
col_meta = rg_meta.column(col)
assert isinstance(col_meta, pq.ColumnChunkMetaData)
repr(col_meta)
with pytest.raises(IndexError):
meta.row_group(-1)
with pytest.raises(IndexError):
meta.row_group(meta.num_row_groups + 1)
rg_meta = meta.row_group(0)
assert rg_meta.num_rows == len(df)
assert rg_meta.num_columns == ncols + 1 # +1 for index
assert rg_meta.total_byte_size > 0
with pytest.raises(IndexError):
col_meta = rg_meta.column(-1)
with pytest.raises(IndexError):
col_meta = rg_meta.column(ncols + 2)
col_meta = rg_meta.column(0)
assert col_meta.file_offset > 0
assert col_meta.file_path == '' # created from BytesIO
assert col_meta.physical_type == 'BOOLEAN'
assert col_meta.num_values == 10000
assert col_meta.path_in_schema == 'bool'
assert col_meta.is_stats_set is True
assert isinstance(col_meta.statistics, pq.Statistics)
assert col_meta.compression == 'SNAPPY'
assert col_meta.encodings == ('PLAIN', 'RLE')
assert col_meta.has_dictionary_page is False
assert col_meta.dictionary_page_offset is None
assert col_meta.data_page_offset > 0
assert col_meta.total_compressed_size > 0
assert col_meta.total_uncompressed_size > 0
with pytest.raises(NotImplementedError):
col_meta.has_index_page
with pytest.raises(NotImplementedError):
col_meta.index_page_offset
@pytest.mark.pandas
@pytest.mark.parametrize(
(
'data',
'type',
'physical_type',
'min_value',
'max_value',
'null_count',
'num_values',
'distinct_count'
),
[
([1, 2, 2, None, 4], pa.uint8(), 'INT32', 1, 4, 1, 4, 0),
([1, 2, 2, None, 4], pa.uint16(), 'INT32', 1, 4, 1, 4, 0),
([1, 2, 2, None, 4], pa.uint32(), 'INT32', 1, 4, 1, 4, 0),
([1, 2, 2, None, 4], pa.uint64(), 'INT64', 1, 4, 1, 4, 0),
([-1, 2, 2, None, 4], pa.int8(), 'INT32', -1, 4, 1, 4, 0),
([-1, 2, 2, None, 4], pa.int16(), 'INT32', -1, 4, 1, 4, 0),
([-1, 2, 2, None, 4], pa.int32(), 'INT32', -1, 4, 1, 4, 0),
([-1, 2, 2, None, 4], pa.int64(), 'INT64', -1, 4, 1, 4, 0),
(
[-1.1, 2.2, 2.3, None, 4.4], pa.float32(),
'FLOAT', -1.1, 4.4, 1, 4, 0
),
(
[-1.1, 2.2, 2.3, None, 4.4], pa.float64(),
'DOUBLE', -1.1, 4.4, 1, 4, 0
),
(
[u'', u'b', unichar(1000), None, u'aaa'], pa.binary(),
'BYTE_ARRAY', b'', unichar(1000).encode('utf-8'), 1, 4, 0
),
(
[True, False, False, True, True], pa.bool_(),
'BOOLEAN', False, True, 0, 5, 0
),
(
[b'\x00', b'b', b'12', None, b'aaa'], pa.binary(),
'BYTE_ARRAY', b'\x00', b'b', 1, 4, 0
),
]
)
def test_parquet_column_statistics_api(data, type, physical_type, min_value,
max_value, null_count, num_values,
distinct_count):
df = pd.DataFrame({'data': data})
schema = pa.schema([pa.field('data', type)])
table = pa.Table.from_pandas(df, schema=schema, safe=False)
fileh = make_sample_file(table)
meta = fileh.metadata
rg_meta = meta.row_group(0)
col_meta = rg_meta.column(0)
stat = col_meta.statistics
assert stat.has_min_max
assert _close(type, stat.min, min_value)
assert _close(type, stat.max, max_value)
assert stat.null_count == null_count
assert stat.num_values == num_values
# TODO(kszucs) until parquet-cpp API doesn't expose HasDistinctCount
assert stat.distinct_count == distinct_count
assert stat.physical_type == physical_type
def _close(type, left, right):
if type == pa.float32():
return abs(left - right) < 1E-7
elif type == pa.float64():
return abs(left - right) < 1E-13
else:
return left == right
def test_statistics_convert_logical_types(tempdir):
cases = [(10, 11164359321221007157, pa.uint64()),
(10, 4294967295, pa.uint32()),
(u"ähnlich", u"öffentlich", pa.utf8()),
(datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000),
pa.time32('ms')),
(datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000),
pa.time64('us')),
(datetime.datetime(2019, 6, 24, 0, 0, 0, 1000),
datetime.datetime(2019, 6, 25, 0, 0, 0, 1000),
pa.timestamp('ms')),
(datetime.datetime(2019, 6, 24, 0, 0, 0, 1000),
datetime.datetime(2019, 6, 25, 0, 0, 0, 1000),
pa.timestamp('us'))]
for i, (min_val, max_val, typ) in enumerate(cases):
t = pa.Table.from_arrays([pa.array([min_val, max_val], type=typ)],
['col'])
path = str(tempdir / ('example{}.parquet'.format(i)))
pq.write_table(t, path, version='2.0')
pf = pq.ParquetFile(path)
stats = pf.metadata.row_group(0).column(0).statistics
assert stats.min == min_val
assert stats.max == max_val
def test_parquet_write_disable_statistics(tempdir):
table = pa.Table.from_pydict(
{'a': pa.array([1, 2, 3]), 'b': pa.array(['a', 'b', 'c'])})
_write_table(table, tempdir / 'data.parquet')
meta = pq.read_metadata(tempdir / 'data.parquet')
for col in [0, 1]:
cc = meta.row_group(0).column(col)
assert cc.is_stats_set is True
assert cc.statistics is not None
_write_table(table, tempdir / 'data2.parquet', write_statistics=False)
meta = pq.read_metadata(tempdir / 'data2.parquet')
for col in [0, 1]:
cc = meta.row_group(0).column(col)
assert cc.is_stats_set is False
assert cc.statistics is None
_write_table(table, tempdir / 'data3.parquet', write_statistics=['a'])
meta = pq.read_metadata(tempdir / 'data3.parquet')
cc_a = meta.row_group(0).column(0)
assert cc_a.is_stats_set is True
assert cc_a.statistics is not None
cc_b = meta.row_group(0).column(1)
assert cc_b.is_stats_set is False
assert cc_b.statistics is None
@pytest.mark.pandas
def test_compare_schemas():
df = alltypes_sample(size=10000)
fileh = make_sample_file(df)
fileh2 = make_sample_file(df)
fileh3 = make_sample_file(df[df.columns[::2]])
assert isinstance(fileh.schema, pq.ParquetSchema)
assert fileh.schema.equals(fileh.schema)
assert fileh.schema == fileh.schema
assert fileh.schema.equals(fileh2.schema)
assert fileh.schema == fileh2.schema
assert fileh.schema != 'arbitrary object'
assert not fileh.schema.equals(fileh3.schema)
assert fileh.schema != fileh3.schema
assert isinstance(fileh.schema[0], pq.ColumnSchema)
assert fileh.schema[0].equals(fileh.schema[0])
assert fileh.schema[0] == fileh.schema[0]
assert not fileh.schema[0].equals(fileh.schema[1])
assert fileh.schema[0] != fileh.schema[1]
assert fileh.schema[0] != 'arbitrary object'
def test_validate_schema_write_table(tempdir):
simple_fields = [
pa.field('POS', pa.uint32()),
pa.field('desc', pa.string())
]
simple_schema = pa.schema(simple_fields)
simple_from_array = [pa.array([1]), pa.array(['bla'])]
simple_table = pa.Table.from_arrays(simple_from_array, ['POS', 'desc'])
path = tempdir / 'simple_validate_schema.parquet'
with pq.ParquetWriter(path, simple_schema,
version='2.0',
compression='snappy', flavor='spark') as w:
with pytest.raises(ValueError):
w.write_table(simple_table)
@pytest.mark.pandas
def test_column_of_arrays(tempdir):
df, schema = dataframe_with_arrays()
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename, version="2.0", coerce_timestamps='ms')
table_read = _read_table(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_coerce_timestamps(tempdir):
from collections import OrderedDict
arrays = OrderedDict()
fields = [pa.field('datetime64',
pa.list_(pa.timestamp('ms')))]
arrays['datetime64'] = [
np.array(['2007-07-13T01:23:34.123456789',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ms]'),
None,
None,
np.array(['2007-07-13T02',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ms]'),
]
df = pd.DataFrame(arrays)
schema = pa.schema(fields)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename, version="2.0", coerce_timestamps='us')
table_read = _read_table(filename)
df_read = table_read.to_pandas()
df_expected = df.copy()
for i, x in enumerate(df_expected['datetime64']):
if isinstance(x, np.ndarray):
df_expected['datetime64'][i] = x.astype('M8[us]')
tm.assert_frame_equal(df_expected, df_read)
with pytest.raises(ValueError):
_write_table(arrow_table, filename, version='2.0',
coerce_timestamps='unknown')
@pytest.mark.pandas
def test_coerce_timestamps_truncated(tempdir):
dt_us = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1,
second=1, microsecond=1)
dt_ms = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1,
second=1)
fields_us = [pa.field('datetime64', pa.timestamp('us'))]
arrays_us = {'datetime64': [dt_us, dt_ms]}
df_us = pd.DataFrame(arrays_us)
schema_us = pa.schema(fields_us)
filename = tempdir / 'pandas_truncated.parquet'
table_us = pa.Table.from_pandas(df_us, schema=schema_us)
_write_table(table_us, filename, version="2.0", coerce_timestamps='ms',
allow_truncated_timestamps=True)
table_ms = _read_table(filename)
df_ms = table_ms.to_pandas()
arrays_expected = {'datetime64': [dt_ms, dt_ms]}
df_expected = pd.DataFrame(arrays_expected)
tm.assert_frame_equal(df_expected, df_ms)
@pytest.mark.pandas
def test_column_of_lists(tempdir):
df, schema = dataframe_with_lists(parquet_compatible=True)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename, version='2.0')
table_read = _read_table(filename)
df_read = table_read.to_pandas()
if PY2:
for col in ['date32[day]_list', 'date64[ms]_list']:
df[col] = df[col].apply(
lambda x: list(map(np.datetime64, x)) if x else x
)
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_date_time_types(tempdir):
t1 = pa.date32()
data1 = np.array([17259, 17260, 17261], dtype='int32')
a1 = pa.array(data1, type=t1)
t2 = pa.date64()
data2 = data1.astype('int64') * 86400000
a2 = pa.array(data2, type=t2)
t3 = pa.timestamp('us')
start = pd.Timestamp('2001-01-01').value / 1000
data3 = np.array([start, start + 1, start + 2], dtype='int64')
a3 = pa.array(data3, type=t3)
t4 = pa.time32('ms')
data4 = np.arange(3, dtype='i4')
a4 = pa.array(data4, type=t4)
t5 = pa.time64('us')
a5 = pa.array(data4.astype('int64'), type=t5)
t6 = pa.time32('s')
a6 = pa.array(data4, type=t6)
ex_t6 = pa.time32('ms')
ex_a6 = pa.array(data4 * 1000, type=ex_t6)
t7 = pa.timestamp('ns')
start = pd.Timestamp('2001-01-01').value
data7 = np.array([start, start + 1000, start + 2000],
dtype='int64')
a7 = pa.array(data7, type=t7)
table = pa.Table.from_arrays([a1, a2, a3, a4, a5, a6, a7],
['date32', 'date64', 'timestamp[us]',
'time32[s]', 'time64[us]',
'time32_from64[s]',
'timestamp[ns]'])
expected = pa.Table.from_arrays([a1, a1, a3, a4, a5, ex_a6, a7],
['date32', 'date64', 'timestamp[us]',
'time32[s]', 'time64[us]',
'time32_from64[s]',
'timestamp[ns]'])
_check_roundtrip(table, expected=expected, version='2.0')
t0 = pa.timestamp('ms')
data0 = np.arange(4, dtype='int64')
a0 = pa.array(data0, type=t0)
t1 = pa.timestamp('us')
data1 = np.arange(4, dtype='int64')
a1 = pa.array(data1, type=t1)
t2 = pa.timestamp('ns')
data2 = np.arange(4, dtype='int64')
a2 = pa.array(data2, type=t2)
table = pa.Table.from_arrays([a0, a1, a2],
['ts[ms]', 'ts[us]', 'ts[ns]'])
expected = pa.Table.from_arrays([a0, a1, a2],
['ts[ms]', 'ts[us]', 'ts[ns]'])
filename = tempdir / 'int64_timestamps.parquet'
_write_table(table, filename, version='2.0')
parquet_schema = pq.ParquetFile(filename).schema
for i in range(3):
assert parquet_schema.column(i).physical_type == 'INT64'
read_table = _read_table(filename)
assert read_table.equals(expected)
t0_ns = pa.timestamp('ns')
data0_ns = np.array(data0 * 1000000, dtype='int64')
a0_ns = pa.array(data0_ns, type=t0_ns)
t1_ns = pa.timestamp('ns')
data1_ns = np.array(data1 * 1000, dtype='int64')
a1_ns = pa.array(data1_ns, type=t1_ns)
expected = pa.Table.from_arrays([a0_ns, a1_ns, a2],
['ts[ms]', 'ts[us]', 'ts[ns]'])
filename = tempdir / 'explicit_int96_timestamps.parquet'
_write_table(table, filename, version='2.0',
use_deprecated_int96_timestamps=True)
parquet_schema = pq.ParquetFile(filename).schema
for i in range(3):
assert parquet_schema.column(i).physical_type == 'INT96'
read_table = _read_table(filename)
assert read_table.equals(expected)
filename = tempdir / 'spark_int96_timestamps.parquet'
_write_table(table, filename, version='2.0',
flavor='spark')
parquet_schema = pq.ParquetFile(filename).schema
for i in range(3):
assert parquet_schema.column(i).physical_type == 'INT96'
read_table = _read_table(filename)
assert read_table.equals(expected)
def test_timestamp_restore_timezone():
ty = pa.timestamp('ms', tz='America/New_York')
arr = pa.array([1, 2, 3], type=ty)
t = pa.table([arr], names=['f0'])
_check_roundtrip(t)
@pytest.mark.pandas
def test_list_of_datetime_time_roundtrip():
times = pd.to_datetime(['09:00', '09:30', '10:00', '10:30', '11:00',
'11:30', '12:00'])
df = pd.DataFrame({'time': [times.time]})
_roundtrip_pandas_dataframe(df, write_kwargs={})
@pytest.mark.pandas
def test_parquet_version_timestamp_differences():
i_s = pd.Timestamp('2010-01-01').value / 1000000000
d_s = np.arange(i_s, i_s + 10, 1, dtype='int64')
d_ms = d_s * 1000
d_us = d_ms * 1000
d_ns = d_us * 1000
a_s = pa.array(d_s, type=pa.timestamp('s'))
a_ms = pa.array(d_ms, type=pa.timestamp('ms'))
a_us = pa.array(d_us, type=pa.timestamp('us'))
a_ns = pa.array(d_ns, type=pa.timestamp('ns'))
names = ['ts:s', 'ts:ms', 'ts:us', 'ts:ns']
table = pa.Table.from_arrays([a_s, a_ms, a_us, a_ns], names)
expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_us], names)
_check_roundtrip(table, expected)
expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_ns], names)
_check_roundtrip(table, expected, version='2.0')
expected = pa.Table.from_arrays([a_ms, a_ms, a_ms, a_ms], names)
_check_roundtrip(table, expected, coerce_timestamps='ms')
expected = pa.Table.from_arrays([a_us, a_us, a_us, a_us], names)
_check_roundtrip(table, expected, version='2.0', coerce_timestamps='us')
expected = pa.Table.from_arrays([a_ns, a_ns, a_ns, a_ns], names)
_check_roundtrip(table, expected,
use_deprecated_int96_timestamps=True)
_check_roundtrip(table, expected, version='2.0',
use_deprecated_int96_timestamps=True)
def test_large_list_records():
list_lengths = np.random.randint(0, 500, size=50)
list_lengths[::10] = 0
list_values = [list(map(int, np.random.randint(0, 100, size=x)))
if i % 8 else None
for i, x in enumerate(list_lengths)]
a1 = pa.array(list_values)
table = pa.Table.from_arrays([a1], ['int_lists'])
_check_roundtrip(table)
def test_sanitized_spark_field_names():
a0 = pa.array([0, 1, 2, 3, 4])
name = 'prohib; ,\t{}'
table = pa.Table.from_arrays([a0], [name])
result = _roundtrip_table(table, write_table_kwargs={'flavor': 'spark'})
expected_name = 'prohib______'
assert result.schema[0].name == expected_name
@pytest.mark.pandas
def test_spark_flavor_preserves_pandas_metadata():
df = _test_dataframe(size=100)
df.index = np.arange(0, 10 * len(df), 10)
df.index.name = 'foo'
result = _roundtrip_pandas_dataframe(df, {'version': '2.0',
'flavor': 'spark'})
tm.assert_frame_equal(result, df)
def test_fixed_size_binary():
t0 = pa.binary(10)
data = [b'fooooooooo', None, b'barooooooo', b'quxooooooo']
a0 = pa.array(data, type=t0)
table = pa.Table.from_arrays([a0],
['binary[10]'])
_check_roundtrip(table)
@pytest.mark.pandas
def test_multithreaded_read():
df = alltypes_sample(size=10000)
table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(table, buf, compression='SNAPPY', version='2.0')
buf.seek(0)
table1 = _read_table(buf, use_threads=True)
buf.seek(0)
table2 = _read_table(buf, use_threads=False)
assert table1.equals(table2)
@pytest.mark.pandas
def test_min_chunksize():
data = pd.DataFrame([np.arange(4)], columns=['A', 'B', 'C', 'D'])
table = pa.Table.from_pandas(data.reset_index())
buf = io.BytesIO()
_write_table(table, buf, chunk_size=-1)
buf.seek(0)
result = _read_table(buf)
assert result.equals(table)
with pytest.raises(ValueError):
_write_table(table, buf, chunk_size=0)
@pytest.mark.pandas
def test_pass_separate_metadata():
df = alltypes_sample(size=10000)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, compression='snappy', version='2.0')
buf.seek(0)
metadata = pq.read_metadata(buf)
buf.seek(0)
fileh = pq.ParquetFile(buf, metadata=metadata)
tm.assert_frame_equal(df, fileh.read().to_pandas())
@pytest.mark.pandas
def test_read_single_row_group():
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
assert pf.num_row_groups == K
row_groups = [pf.read_row_group(i) for i in range(K)]
result = pa.concat_tables(row_groups)
tm.assert_frame_equal(df, result.to_pandas())
@pytest.mark.pandas
def test_read_single_row_group_with_column_subset():
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
cols = list(df.columns[:2])
row_groups = [pf.read_row_group(i, columns=cols) for i in range(K)]
result = pa.concat_tables(row_groups)
tm.assert_frame_equal(df[cols], result.to_pandas())
row_groups = [pf.read_row_group(i, columns=cols + cols) for i in range(K)]
result = pa.concat_tables(row_groups)
tm.assert_frame_equal(df[cols], result.to_pandas())
@pytest.mark.pandas
def test_scan_contents():
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
assert pf.scan_contents() == 10000
assert pf.scan_contents(df.columns[:4]) == 10000
@pytest.mark.pandas
def test_parquet_piece_read(tempdir):
df = _test_dataframe(1000)
table = pa.Table.from_pandas(df)
path = tempdir / 'parquet_piece_read.parquet'
_write_table(table, path, version='2.0')
piece1 = pq.ParquetDatasetPiece(path)
result = piece1.read()
assert result.equals(table)
@pytest.mark.pandas
def test_parquet_piece_open_and_get_metadata(tempdir):
df = _test_dataframe(100)
table = pa.Table.from_pandas(df)
path = tempdir / 'parquet_piece_read.parquet'
_write_table(table, path, version='2.0')
piece = pq.ParquetDatasetPiece(path)
table1 = piece.read()
assert isinstance(table1, pa.Table)
meta1 = piece.get_metadata()
assert isinstance(meta1, pq.FileMetaData)
assert table == table1
def test_parquet_piece_basics():
path = '/baz.parq'
piece1 = pq.ParquetDatasetPiece(path)
piece2 = pq.ParquetDatasetPiece(path, row_group=1)
piece3 = pq.ParquetDatasetPiece(
path, row_group=1, partition_keys=[('foo', 0), ('bar', 1)])
assert str(piece1) == path
assert str(piece2) == '/baz.parq | row_group=1'
assert str(piece3) == 'partition[foo=0, bar=1] /baz.parq | row_group=1'
assert piece1 == piece1
assert piece2 == piece2
assert piece3 == piece3
assert piece1 != piece3
def test_partition_set_dictionary_type():
set1 = pq.PartitionSet('key1', [u('foo'), u('bar'), u('baz')])
set2 = pq.PartitionSet('key2', [2007, 2008, 2009])
assert isinstance(set1.dictionary, pa.StringArray)
assert isinstance(set2.dictionary, pa.IntegerArray)
set3 = pq.PartitionSet('key2', [datetime.datetime(2007, 1, 1)])
with pytest.raises(TypeError):
set3.dictionary
@pytest.mark.pandas
def test_read_partitioned_directory(tempdir):
fs = LocalFileSystem.get_instance()
_partition_test_for_filesystem(fs, tempdir)
@pytest.mark.pandas
def test_create_parquet_dataset_multi_threaded(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
_partition_test_for_filesystem(fs, base_path)
manifest = pq.ParquetManifest(base_path, filesystem=fs,
metadata_nthreads=1)
dataset = pq.ParquetDataset(base_path, filesystem=fs, metadata_nthreads=16)
assert len(dataset.pieces) > 0
partitions = dataset.partitions
assert len(partitions.partition_names) > 0
assert partitions.partition_names == manifest.partitions.partition_names
assert len(partitions.levels) == len(manifest.partitions.levels)
@pytest.mark.pandas
def test_equivalency(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1]
string_keys = ['a', 'b', 'c']
boolean_keys = [True, False]
partition_spec = [
['integer', integer_keys],
['string', string_keys],
['boolean', boolean_keys]
]
df = pd.DataFrame({
'integer': np.array(integer_keys, dtype='i4').repeat(15),
'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2),
'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5),
3),
}, columns=['integer', 'string', 'boolean'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[('integer', '=', 1), ('string', '!=', 'b'),
('boolean', '==', True)]
)
table = dataset.read()
result_df = (table.to_pandas().reset_index(drop=True))
assert 0 not in result_df['integer'].values
assert 'b' not in result_df['string'].values
assert False not in result_df['boolean'].values
filters = [
[
('integer', '=', 1),
('string', '!=', 'b'),
('boolean', '==', 'True')
],
[('integer', '=', 0), ('boolean', '==', 'False')]
]
dataset = pq.ParquetDataset(base_path, filesystem=fs, filters=filters)
table = dataset.read()
result_df = table.to_pandas().reset_index(drop=True)
df_filter_1 = (np.array(result_df['integer']) == 1) \
& (np.array(result_df['string']) != 'b') \
& (np.array(result_df['boolean']) == 'True')
df_filter_2 = (np.array(result_df['integer']) == 0) \
& (np.array(result_df['boolean']) == 'False')
assert df_filter_1.sum() > 0
assert df_filter_2.sum() > 0
assert result_df.shape[0] == (df_filter_1.sum() + df_filter_2.sum())
with pytest.raises(NotImplementedError):
filters = [[('string', '==', b'1\0a')]]
pq.ParquetDataset(base_path, filesystem=fs, filters=filters)
with pytest.raises(NotImplementedError):
filters = [[('string', '==', u'1\0a')]]
pq.ParquetDataset(base_path, filesystem=fs, filters=filters)
@pytest.mark.pandas
def test_cutoff_exclusive_integer(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[
('integers', '<', 4),
('integers', '>', 1),
]
)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
result_list = [x for x in map(int, result_df['integers'].values)]
assert result_list == [2, 3]
@pytest.mark.pandas
@pytest.mark.xfail(
raises=TypeError,
reason='Loss of type information in creation of categoricals.'
)
def test_cutoff_exclusive_datetime(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
date_keys = [
datetime.date(2018, 4, 9),
datetime.date(2018, 4, 10),
datetime.date(2018, 4, 11),
datetime.date(2018, 4, 12),
datetime.date(2018, 4, 13)
]
partition_spec = [
['dates', date_keys]
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'dates': np.array(date_keys, dtype='datetime64'),
}, columns=['index', 'dates'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[
('dates', '<', "2018-04-12"),
('dates', '>', "2018-04-10")
]
)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
expected = pd.Categorical(
np.array([datetime.date(2018, 4, 11)], dtype='datetime64'),
categories=np.array(date_keys, dtype='datetime64'))
assert result_df['dates'].values == expected
@pytest.mark.pandas
def test_inclusive_integer(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[
('integers', '<=', 3),
('integers', '>=', 2),
]
)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
result_list = [int(x) for x in map(int, result_df['integers'].values)]
assert result_list == [2, 3]
@pytest.mark.pandas
def test_inclusive_set(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1]
string_keys = ['a', 'b', 'c']
boolean_keys = [True, False]
partition_spec = [
['integer', integer_keys],
['string', string_keys],
['boolean', boolean_keys]
]
df = pd.DataFrame({
'integer': np.array(integer_keys, dtype='i4').repeat(15),
'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2),
'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5),
3),
}, columns=['integer', 'string', 'boolean'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[('integer', 'in', {1}), ('string', 'in', {'a', 'b'}),
('boolean', 'in', {True})]
)
table = dataset.read()
result_df = (table.to_pandas().reset_index(drop=True))
assert 0 not in result_df['integer'].values
assert 'c' not in result_df['string'].values
assert False not in result_df['boolean'].values
@pytest.mark.pandas
def test_invalid_pred_op(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
with pytest.raises(ValueError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[
('integers', '=<', 3),
])
with pytest.raises(ValueError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[
('integers', 'in', set()),
])
with pytest.raises(ValueError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[
('integers', '!=', {3}),
])
@pytest.mark.pandas
def test_filters_read_table(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
table = pq.read_table(
base_path, filesystem=fs, filters=[('integers', '<', 3)])
assert table.num_rows == 3
table = pq.read_table(
base_path, filesystem=fs, filters=[[('integers', '<', 3)]])
assert table.num_rows == 3
table = pq.read_pandas(
base_path, filters=[('integers', '<', 3)])
assert table.num_rows == 3
@pytest.yield_fixture
def s3_example():
access_key = os.environ['PYARROW_TEST_S3_ACCESS_KEY']
secret_key = os.environ['PYARROW_TEST_S3_SECRET_KEY']
bucket_name = os.environ['PYARROW_TEST_S3_BUCKET']
import s3fs
fs = s3fs.S3FileSystem(key=access_key, secret=secret_key)
test_dir = guid()
bucket_uri = 's3://{0}/{1}'.format(bucket_name, test_dir)
fs.mkdir(bucket_uri)
yield fs, bucket_uri
fs.rm(bucket_uri, recursive=True)
@pytest.mark.pandas
@pytest.mark.s3
def test_read_partitioned_directory_s3fs(s3_example):
from pyarrow.filesystem import S3FSWrapper
fs, bucket_uri = s3_example
wrapper = S3FSWrapper(fs)
_partition_test_for_filesystem(wrapper, bucket_uri)
dataset = pq.ParquetDataset(bucket_uri, filesystem=fs)
dataset.read()
def _partition_test_for_filesystem(fs, base_path):
foo_keys = [0, 1]
bar_keys = ['a', 'b', 'c']
partition_spec = [
['foo', foo_keys],
['bar', bar_keys]
]
N = 30
df = pd.DataFrame({
'index': np.arange(N),
'foo': np.array(foo_keys, dtype='i4').repeat(15),
'bar': np.tile(np.tile(np.array(bar_keys, dtype=object), 5), 2),
'values': np.random.randn(N)
}, columns=['index', 'foo', 'bar', 'values'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(base_path, filesystem=fs)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
expected_df = (df.sort_values(by='index')
.reset_index(drop=True)
.reindex(columns=result_df.columns))
expected_df['foo'] = pd.Categorical(df['foo'], categories=foo_keys)
expected_df['bar'] = pd.Categorical(df['bar'], categories=bar_keys)
assert (result_df.columns == ['index', 'values', 'foo', 'bar']).all()
tm.assert_frame_equal(result_df, expected_df)
def _generate_partition_directories(fs, base_dir, partition_spec, df):
DEPTH = len(partition_spec)
def _visit_level(base_dir, level, part_keys):
name, values = partition_spec[level]
for value in values:
this_part_keys = part_keys + [(name, value)]
level_dir = base_dir / '{0}={1}'.format(name, value)
fs.mkdir(level_dir)
if level == DEPTH - 1:
file_path = level_dir / guid()
filtered_df = _filter_partition(df, this_part_keys)
part_table = pa.Table.from_pandas(filtered_df)
with fs.open(file_path, 'wb') as f:
_write_table(part_table, f)
assert fs.exists(file_path)
(level_dir / '_SUCCESS').touch()
else:
_visit_level(level_dir, level + 1, this_part_keys)
(level_dir / '_SUCCESS').touch()
_visit_level(base_dir, 0, [])
def _test_read_common_metadata_files(fs, base_path):
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
base_path = str(base_path)
data_path = os.path.join(base_path, 'data.parquet')
table = pa.Table.from_pandas(df)
with fs.open(data_path, 'wb') as f:
_write_table(table, f)
metadata_path = os.path.join(base_path, '_common_metadata')
with fs.open(metadata_path, 'wb') as f:
pq.write_metadata(table.schema, f)
dataset = pq.ParquetDataset(base_path, filesystem=fs)
assert dataset.common_metadata_path == str(metadata_path)
with fs.open(data_path) as f:
common_schema = pq.read_metadata(f).schema
assert dataset.schema.equals(common_schema)
dataset2 = pq.ParquetDataset([base_path], filesystem=fs)
assert dataset2.schema.equals(dataset.schema)
@pytest.mark.pandas
def test_read_common_metadata_files(tempdir):
fs = LocalFileSystem.get_instance()
_test_read_common_metadata_files(fs, tempdir)
@pytest.mark.pandas
def test_read_metadata_files(tempdir):
fs = LocalFileSystem.get_instance()
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
data_path = tempdir / 'data.parquet'
table = pa.Table.from_pandas(df)
with fs.open(data_path, 'wb') as f:
_write_table(table, f)
metadata_path = tempdir / '_metadata'
with fs.open(metadata_path, 'wb') as f:
pq.write_metadata(table.schema, f)
dataset = pq.ParquetDataset(tempdir, filesystem=fs)
assert dataset.metadata_path == str(metadata_path)
with fs.open(data_path) as f:
metadata_schema = pq.read_metadata(f).schema
assert dataset.schema.equals(metadata_schema)
@pytest.mark.pandas
def test_read_schema(tempdir):
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
data_path = tempdir / 'test.parquet'
table = pa.Table.from_pandas(df)
_write_table(table, data_path)
read1 = pq.read_schema(data_path)
read2 = pq.read_schema(data_path, memory_map=True)
assert table.schema.equals(read1, check_metadata=False)
assert table.schema.equals(read2, check_metadata=False)
assert table.schema.metadata[b'pandas'] == read1.metadata[b'pandas']
def _filter_partition(df, part_keys):
predicate = np.ones(len(df), dtype=bool)
to_drop = []
for name, value in part_keys:
to_drop.append(name)
if isinstance(value, (datetime.date, datetime.datetime)):
value = pd.Timestamp(value)
predicate &= df[name] == value
return df[predicate].drop(to_drop, axis=1)
@pytest.mark.pandas
def test_read_multiple_files(tempdir):
nfiles = 10
size = 5
dirpath = tempdir / guid()
dirpath.mkdir()
test_data = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
df['uint32'] = df['uint32'].astype(np.int64)
path = dirpath / '{}.parquet'.format(i)
table = pa.Table.from_pandas(df)
_write_table(table, path)
test_data.append(table)
paths.append(path)
# Write a _SUCCESS.crc file
(dirpath / '_SUCCESS.crc').touch()
def read_multiple_files(paths, columns=None, use_threads=True, **kwargs):
dataset = pq.ParquetDataset(paths, **kwargs)
return dataset.read(columns=columns, use_threads=use_threads)
result = read_multiple_files(paths)
expected = pa.concat_tables(test_data)
assert result.equals(expected)
# Read with provided metadata
metadata = pq.read_metadata(paths[0])
result2 = read_multiple_files(paths, metadata=metadata)
assert result2.equals(expected)
result3 = pa.localfs.read_parquet(dirpath, schema=metadata.schema)
assert result3.equals(expected)
# Read column subset
to_read = [0, 2, 6, result.num_columns - 1]
col_names = [result.field(i).name for i in to_read]
out = pa.localfs.read_parquet(dirpath, columns=col_names)
expected = pa.Table.from_arrays([result.column(i) for i in to_read],
names=col_names,
metadata=result.schema.metadata)
assert out.equals(expected)
# Read with multiple threads
pa.localfs.read_parquet(dirpath, use_threads=True)
# Test failure modes with non-uniform metadata
bad_apple = _test_dataframe(size, seed=i).iloc[:, :4]
bad_apple_path = tempdir / '{}.parquet'.format(guid())
t = pa.Table.from_pandas(bad_apple)
_write_table(t, bad_apple_path)
bad_meta = pq.read_metadata(bad_apple_path)
with pytest.raises(ValueError):
read_multiple_files(paths + [bad_apple_path])
with pytest.raises(ValueError):
read_multiple_files(paths, metadata=bad_meta)
mixed_paths = [bad_apple_path, paths[0]]
with pytest.raises(ValueError):
read_multiple_files(mixed_paths, schema=bad_meta.schema)
with pytest.raises(ValueError):
read_multiple_files(mixed_paths)
@pytest.mark.pandas
def test_dataset_read_pandas(tempdir):
nfiles = 5
size = 5
dirpath = tempdir / guid()
dirpath.mkdir()
test_data = []
frames = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
df.index = np.arange(i * size, (i + 1) * size)
df.index.name = 'index'
path = dirpath / '{}.parquet'.format(i)
table = pa.Table.from_pandas(df)
_write_table(table, path)
test_data.append(table)
frames.append(df)
paths.append(path)
dataset = pq.ParquetDataset(dirpath)
columns = ['uint8', 'strings']
result = dataset.read_pandas(columns=columns).to_pandas()
expected = pd.concat([x[columns] for x in frames])
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_dataset_no_memory_map(tempdir):
# ARROW-2627: Check that we can use ParquetDataset without memory-mapping
dirpath = tempdir / guid()
dirpath.mkdir()
df = _test_dataframe(10, seed=0)
path = dirpath / '{}.parquet'.format(0)
table = pa.Table.from_pandas(df)
_write_table(table, path, version='2.0')
# TODO(wesm): Not sure how to easily check that memory mapping is _not_
# used. Mocking is not especially easy for pa.memory_map
dataset = pq.ParquetDataset(dirpath, memory_map=False)
assert dataset.pieces[0].read().equals(table)
@pytest.mark.pandas
@pytest.mark.parametrize('preserve_index', [True, False, None])
def test_dataset_read_pandas_common_metadata(tempdir, preserve_index):
# ARROW-1103
nfiles = 5
size = 5
dirpath = tempdir / guid()
dirpath.mkdir()
test_data = []
frames = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
df.index = pd.Index(np.arange(i * size, (i + 1) * size), name='index')
path = dirpath / '{}.parquet'.format(i)
table = pa.Table.from_pandas(df, preserve_index=preserve_index)
# Obliterate metadata
table = table.replace_schema_metadata(None)
assert table.schema.metadata is None
_write_table(table, path)
test_data.append(table)
frames.append(df)
paths.append(path)
# Write _metadata common file
table_for_metadata = pa.Table.from_pandas(
df, preserve_index=preserve_index
)
pq.write_metadata(table_for_metadata.schema, dirpath / '_metadata')
dataset = pq.ParquetDataset(dirpath)
columns = ['uint8', 'strings']
result = dataset.read_pandas(columns=columns).to_pandas()
expected = pd.concat([x[columns] for x in frames])
expected.index.name = (
df.index.name if preserve_index is not False else None)
tm.assert_frame_equal(result, expected)
def _make_example_multifile_dataset(base_path, nfiles=10, file_nrows=5):
test_data = []
paths = []
for i in range(nfiles):
df = _test_dataframe(file_nrows, seed=i)
path = base_path / '{}.parquet'.format(i)
test_data.append(_write_table(df, path))
paths.append(path)
return paths
@pytest.mark.pandas
def test_ignore_private_directories(tempdir):
dirpath = tempdir / guid()
dirpath.mkdir()
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
# private directory
(dirpath / '_impala_staging').mkdir()
dataset = pq.ParquetDataset(dirpath)
assert set(map(str, paths)) == set(x.path for x in dataset.pieces)
@pytest.mark.pandas
def test_ignore_hidden_files_dot(tempdir):
dirpath = tempdir / guid()
dirpath.mkdir()
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
with (dirpath / '.DS_Store').open('wb') as f:
f.write(b'gibberish')
with (dirpath / '.private').open('wb') as f:
f.write(b'gibberish')
dataset = pq.ParquetDataset(dirpath)
assert set(map(str, paths)) == set(x.path for x in dataset.pieces)
@pytest.mark.pandas
def test_ignore_hidden_files_underscore(tempdir):
dirpath = tempdir / guid()
dirpath.mkdir()
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
with (dirpath / '_committed_123').open('wb') as f:
f.write(b'abcd')
with (dirpath / '_started_321').open('wb') as f:
f.write(b'abcd')
dataset = pq.ParquetDataset(dirpath)
assert set(map(str, paths)) == set(x.path for x in dataset.pieces)
@pytest.mark.pandas
def test_multiindex_duplicate_values(tempdir):
num_rows = 3
numbers = list(range(num_rows))
index = pd.MultiIndex.from_arrays(
[['foo', 'foo', 'bar'], numbers],
names=['foobar', 'some_numbers'],
)
df = pd.DataFrame({'numbers': numbers}, index=index)
table = pa.Table.from_pandas(df)
filename = tempdir / 'dup_multi_index_levels.parquet'
_write_table(table, filename)
result_table = _read_table(filename)
assert table.equals(result_table)
result_df = result_table.to_pandas()
tm.assert_frame_equal(result_df, df)
@pytest.mark.pandas
def test_write_error_deletes_incomplete_file(tempdir):
# ARROW-1285
df = pd.DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'i': pd.date_range('20130101', periods=3, freq='ns')})
pdf = pa.Table.from_pandas(df)
filename = tempdir / 'tmp_file'
try:
_write_table(pdf, filename)
except pa.ArrowException:
pass
assert not filename.exists()
@pytest.mark.pandas
def test_noncoerced_nanoseconds_written_without_exception(tempdir):
# ARROW-1957: the Parquet version 2.0 writer preserves Arrow
# nanosecond timestamps by default
n = 9
df = pd.DataFrame({'x': range(n)},
index=pd.DatetimeIndex(start='2017-01-01',
freq='1n',
periods=n))
tb = pa.Table.from_pandas(df)
filename = tempdir / 'written.parquet'
try:
pq.write_table(tb, filename, version='2.0')
except Exception:
pass
assert filename.exists()
recovered_table = pq.read_table(filename)
assert tb.equals(recovered_table)
# Loss of data thru coercion (without explicit override) still an error
filename = tempdir / 'not_written.parquet'
with pytest.raises(ValueError):
pq.write_table(tb, filename, coerce_timestamps='ms', version='2.0')
def test_read_non_existent_file(tempdir):
path = 'non-existent-file.parquet'
try:
pq.read_table(path)
except Exception as e:
assert path in e.args[0]
def test_read_table_doesnt_warn(datadir):
with pytest.warns(None) as record:
pq.read_table(datadir / 'v0.7.1.parquet')
assert len(record) == 0
def _test_write_to_dataset_with_partitions(base_path,
filesystem=None,
schema=None,
index_name=None):
# ARROW-1400
output_df = pd.DataFrame({'group1': list('aaabbbbccc'),
'group2': list('eefeffgeee'),
'num': list(range(10)),
'nan': [pd.np.nan] * 10,
'date': np.arange('2017-01-01', '2017-01-11',
dtype='datetime64[D]')})
cols = output_df.columns.tolist()
partition_by = ['group1', 'group2']
output_table = pa.Table.from_pandas(output_df, schema=schema, safe=False,
preserve_index=False)
pq.write_to_dataset(output_table, base_path, partition_by,
filesystem=filesystem)
metadata_path = os.path.join(base_path, '_common_metadata')
if filesystem is not None:
with filesystem.open(metadata_path, 'wb') as f:
pq.write_metadata(output_table.schema, f)
else:
pq.write_metadata(output_table.schema, metadata_path)
# ARROW-2891: Ensure the output_schema is preserved when writing a
# partitioned dataset
dataset = pq.ParquetDataset(base_path,
filesystem=filesystem,
validate_schema=True)
# ARROW-2209: Ensure the dataset schema also includes the partition columns
dataset_cols = set(dataset.schema.to_arrow_schema().names)
assert dataset_cols == set(output_table.schema.names)
input_table = dataset.read()
input_df = input_table.to_pandas()
# Read data back in and compare with original DataFrame
# Partitioned columns added to the end of the DataFrame when read
input_df_cols = input_df.columns.tolist()
assert partition_by == input_df_cols[-1 * len(partition_by):]
# Partitioned columns become 'categorical' dtypes
input_df = input_df[cols]
for col in partition_by:
output_df[col] = output_df[col].astype('category')
assert output_df.equals(input_df)
def _test_write_to_dataset_no_partitions(base_path, filesystem=None):
# ARROW-1400
output_df = pd.DataFrame({'group1': list('aaabbbbccc'),
'group2': list('eefeffgeee'),
'num': list(range(10)),
'date': np.arange('2017-01-01', '2017-01-11',
dtype='datetime64[D]')})
cols = output_df.columns.tolist()
output_table = pa.Table.from_pandas(output_df)
if filesystem is None:
filesystem = LocalFileSystem.get_instance()
# Without partitions, append files to root_path
n = 5
for i in range(n):
pq.write_to_dataset(output_table, base_path,
filesystem=filesystem)
output_files = [file for file in filesystem.ls(base_path)
if file.endswith(".parquet")]
assert len(output_files) == n
# Deduplicated incoming DataFrame should match
# original outgoing Dataframe
input_table = pq.ParquetDataset(base_path,
filesystem=filesystem).read()
input_df = input_table.to_pandas()
input_df = input_df.drop_duplicates()
input_df = input_df[cols]
assert output_df.equals(input_df)
@pytest.mark.pandas
def test_write_to_dataset_with_partitions(tempdir):
_test_write_to_dataset_with_partitions(str(tempdir))
@pytest.mark.pandas
def test_write_to_dataset_with_partitions_and_schema(tempdir):
schema = pa.schema([pa.field('group1', type=pa.string()),
pa.field('group2', type=pa.string()),
pa.field('num', type=pa.int64()),
pa.field('nan', type=pa.int32()),
pa.field('date', type=pa.timestamp(unit='us'))])
_test_write_to_dataset_with_partitions(str(tempdir), schema=schema)
@pytest.mark.pandas
def test_write_to_dataset_with_partitions_and_index_name(tempdir):
_test_write_to_dataset_with_partitions(str(tempdir),
index_name='index_name')
@pytest.mark.pandas
def test_write_to_dataset_no_partitions(tempdir):
_test_write_to_dataset_no_partitions(str(tempdir))
@pytest.mark.pandas
def test_write_to_dataset_with_partitions_and_custom_filenames(tempdir):
output_df = pd.DataFrame({'group1': list('aaabbbbccc'),
'group2': list('eefeffgeee'),
'num': list(range(10)),
'nan': [pd.np.nan] * 10,
'date': np.arange('2017-01-01', '2017-01-11',
dtype='datetime64[D]')})
partition_by = ['group1', 'group2']
output_table = pa.Table.from_pandas(output_df)
path = str(tempdir)
def partition_filename_callback(keys):
return "{0}-{1}.parquet".format(*keys)
pq.write_to_dataset(output_table, path,
partition_by, partition_filename_callback)
dataset = pq.ParquetDataset(path)
# ARROW-3538: Ensure partition filenames match the given pattern
# defined in the local function partition_filename_callback
expected_basenames = [
'a-e.parquet', 'a-f.parquet',
'b-e.parquet', 'b-f.parquet',
'b-g.parquet', 'c-e.parquet'
]
output_basenames = [os.path.basename(p.path) for p in dataset.pieces]
assert sorted(expected_basenames) == sorted(output_basenames)
@pytest.mark.large_memory
def test_large_table_int32_overflow():
size = np.iinfo('int32').max + 1
arr = np.ones(size, dtype='uint8')
parr = pa.array(arr, type=pa.uint8())
table = pa.Table.from_arrays([parr], names=['one'])
f = io.BytesIO()
_write_table(table, f)
def _simple_table_roundtrip(table):
stream = pa.BufferOutputStream()
_write_table(table, stream)
buf = stream.getvalue()
return _read_table(buf)
@pytest.mark.pandas
@pytest.mark.large_memory
def test_binary_array_overflow_to_chunked():
# ARROW-3762
# 2^31 + 1 bytes
values = [b'x'] + [
b'x' * (1 << 20)
] * 2 * (1 << 10)
df = pd.DataFrame({'byte_col': values})
tbl = pa.Table.from_pandas(df, preserve_index=False)
read_tbl = _simple_table_roundtrip(tbl)
col0_data = read_tbl[0]
assert isinstance(col0_data, pa.ChunkedArray)
# Split up into 2GB chunks
assert col0_data.num_chunks == 2
assert tbl.equals(read_tbl)
@pytest.mark.pandas
@pytest.mark.large_memory
def test_list_of_binary_large_cell():
# ARROW-4688
data = []
# TODO(wesm): handle chunked children
# 2^31 - 1 bytes in a single cell
# data.append([b'x' * (1 << 20)] * 2047 + [b'x' * ((1 << 20) - 1)])
# A little under 2GB in cell each containing approximately 10MB each
data.extend([[b'x' * 1000000] * 10] * 214)
arr = pa.array(data)
table = pa.Table.from_arrays([arr], ['chunky_cells'])
read_table = _simple_table_roundtrip(table)
assert table.equals(read_table)
@pytest.mark.pandas
def test_index_column_name_duplicate(tempdir):
data = {
'close': {
pd.Timestamp('2017-06-30 01:31:00'): 154.99958999999998,
pd.Timestamp('2017-06-30 01:32:00'): 154.99958999999998,
},
'time': {
pd.Timestamp('2017-06-30 01:31:00'): pd.Timestamp(
'2017-06-30 01:31:00'
),
pd.Timestamp('2017-06-30 01:32:00'): pd.Timestamp(
'2017-06-30 01:32:00'
),
}
}
path = str(tempdir / 'data.parquet')
dfx = pd.DataFrame(data).set_index('time', drop=False)
tdfx = pa.Table.from_pandas(dfx)
_write_table(tdfx, path)
arrow_table = _read_table(path)
result_df = arrow_table.to_pandas()
tm.assert_frame_equal(result_df, dfx)
@pytest.mark.pandas
def test_parquet_nested_convenience(tempdir):
# ARROW-1684
df = pd.DataFrame({
'a': [[1, 2, 3], None, [4, 5], []],
'b': [[1.], None, None, [6., 7.]],
})
path = str(tempdir / 'nested_convenience.parquet')
table = pa.Table.from_pandas(df, preserve_index=False)
_write_table(table, path)
read = pq.read_table(path, columns=['a'])
tm.assert_frame_equal(read.to_pandas(), df[['a']])
read = pq.read_table(path, columns=['a', 'b'])
tm.assert_frame_equal(read.to_pandas(), df)
@pytest.mark.pandas
def test_backwards_compatible_index_naming(datadir):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(io.BytesIO(expected_string), sep=r'\s{2,}',
index_col=None, header=0, engine='python')
table = _read_table(datadir / 'v0.7.1.parquet')
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_backwards_compatible_index_multi_level_named(datadir):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(
io.BytesIO(expected_string), sep=r'\s{2,}',
index_col=['cut', 'color', 'clarity'],
header=0, engine='python'
).sort_index()
table = _read_table(datadir / 'v0.7.1.all-named-index.parquet')
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_backwards_compatible_index_multi_level_some_named(datadir):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(
io.BytesIO(expected_string),
sep=r'\s{2,}', index_col=['cut', 'color', 'clarity'],
header=0, engine='python'
).sort_index()
expected.index = expected.index.set_names(['cut', None, 'clarity'])
table = _read_table(datadir / 'v0.7.1.some-named-index.parquet')
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_backwards_compatible_column_metadata_handling(datadir):
expected = pd.DataFrame(
{'a': [1, 2, 3], 'b': [.1, .2, .3],
'c': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')})
expected.index = pd.MultiIndex.from_arrays(
[['a', 'b', 'c'],
pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')],
names=['index', None])
path = datadir / 'v0.7.1.column-metadata-handling.parquet'
table = _read_table(path)
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
table = _read_table(path, columns=['a'])
result = table.to_pandas()
tm.assert_frame_equal(result, expected[['a']].reset_index(drop=True))
def _make_dataset_for_pickling(tempdir, N=100):
path = tempdir / 'data.parquet'
fs = LocalFileSystem.get_instance()
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
table = pa.Table.from_pandas(df)
num_groups = 3
with pq.ParquetWriter(path, table.schema) as writer:
for i in range(num_groups):
writer.write_table(table)
reader = pq.ParquetFile(path)
assert reader.metadata.num_row_groups == num_groups
metadata_path = tempdir / '_metadata'
with fs.open(metadata_path, 'wb') as f:
pq.write_metadata(table.schema, f)
dataset = pq.ParquetDataset(tempdir, filesystem=fs)
assert dataset.metadata_path == str(metadata_path)
return dataset
@pytest.mark.pandas
@pytest.mark.parametrize('pickler', [
pytest.param(pickle, id='builtin'),
pytest.param(pytest.importorskip('cloudpickle'), id='cloudpickle')
])
def test_pickle_dataset(tempdir, datadir, pickler):
def is_pickleable(obj):
return obj == pickler.loads(pickler.dumps(obj))
dataset = _make_dataset_for_pickling(tempdir)
assert is_pickleable(dataset)
assert is_pickleable(dataset.metadata)
assert is_pickleable(dataset.metadata.schema)
assert len(dataset.metadata.schema)
for column in dataset.metadata.schema:
assert is_pickleable(column)
for piece in dataset.pieces:
assert is_pickleable(piece)
metadata = piece.get_metadata()
assert metadata.num_row_groups
for i in range(metadata.num_row_groups):
assert is_pickleable(metadata.row_group(i))
@pytest.mark.pandas
def test_decimal_roundtrip(tempdir):
num_values = 10
columns = {}
for precision in range(1, 39):
for scale in range(0, precision + 1):
with util.random_seed(0):
random_decimal_values = [
util.randdecimal(precision, scale)
for _ in range(num_values)
]
column_name = ('dec_precision_{:d}_scale_{:d}'
.format(precision, scale))
columns[column_name] = random_decimal_values
expected = pd.DataFrame(columns)
filename = tempdir / 'decimals.parquet'
string_filename = str(filename)
table = pa.Table.from_pandas(expected)
_write_table(table, string_filename)
result_table = _read_table(string_filename)
result = result_table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
@pytest.mark.xfail(
raises=pa.ArrowException, reason='Parquet does not support negative scale'
)
def test_decimal_roundtrip_negative_scale(tempdir):
expected = pd.DataFrame({'decimal_num': [decimal.Decimal('1.23E4')]})
filename = tempdir / 'decimals.parquet'
string_filename = str(filename)
t = pa.Table.from_pandas(expected)
_write_table(t, string_filename)
result_table = _read_table(string_filename)
result = result_table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_parquet_writer_context_obj(tempdir):
df = _test_dataframe(100)
df['unique_id'] = 0
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
out = pa.BufferOutputStream()
with pq.ParquetWriter(out, arrow_table.schema, version='2.0') as writer:
frames = []
for i in range(10):
df['unique_id'] = i
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
writer.write_table(arrow_table)
frames.append(df.copy())
buf = out.getvalue()
result = _read_table(pa.BufferReader(buf))
expected = pd.concat(frames, ignore_index=True)
tm.assert_frame_equal(result.to_pandas(), expected)
@pytest.mark.pandas
def test_parquet_writer_context_obj_with_exception(tempdir):
df = _test_dataframe(100)
df['unique_id'] = 0
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
out = pa.BufferOutputStream()
error_text = 'Artificial Error'
try:
with pq.ParquetWriter(out,
arrow_table.schema,
version='2.0') as writer:
frames = []
for i in range(10):
df['unique_id'] = i
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
writer.write_table(arrow_table)
frames.append(df.copy())
if i == 5:
raise ValueError(error_text)
except Exception as e:
assert str(e) == error_text
buf = out.getvalue()
result = _read_table(pa.BufferReader(buf))
expected = pd.concat(frames, ignore_index=True)
tm.assert_frame_equal(result.to_pandas(), expected)
@pytest.mark.pandas
def test_zlib_compression_bug():
# ARROW-3514: "zlib deflate failed, output buffer too small"
table = pa.Table.from_arrays([pa.array(['abc', 'def'])], ['some_col'])
f = io.BytesIO()
pq.write_table(table, f, compression='gzip')
f.seek(0)
roundtrip = pq.read_table(f)
tm.assert_frame_equal(roundtrip.to_pandas(), table.to_pandas())
@pytest.mark.pandas
def test_merging_parquet_tables_with_different_pandas_metadata(tempdir):
# ARROW-3728: Merging Parquet Files - Pandas Meta in Schema Mismatch
schema = pa.schema([
pa.field('int', pa.int16()),
pa.field('float', pa.float32()),
pa.field('string', pa.string())
])
df1 = pd.DataFrame({
'int': np.arange(3, dtype=np.uint8),
'float': np.arange(3, dtype=np.float32),
'string': ['ABBA', 'EDDA', 'ACDC']
})
df2 = pd.DataFrame({
'int': [4, 5],
'float': [1.1, None],
'string': [None, None]
})
table1 = pa.Table.from_pandas(df1, schema=schema, preserve_index=False)
table2 = pa.Table.from_pandas(df2, schema=schema, preserve_index=False)
assert not table1.schema.equals(table2.schema)
assert table1.schema.equals(table2.schema, check_metadata=False)
writer = pq.ParquetWriter(tempdir / 'merged.parquet', schema=schema)
writer.write_table(table1)
writer.write_table(table2)
def test_empty_row_groups(tempdir):
# ARROW-3020
table = pa.Table.from_arrays([pa.array([], type='int32')], ['f0'])
path = tempdir / 'empty_row_groups.parquet'
num_groups = 3
with pq.ParquetWriter(path, table.schema) as writer:
for i in range(num_groups):
writer.write_table(table)
reader = pq.ParquetFile(path)
assert reader.metadata.num_row_groups == num_groups
for i in range(num_groups):
assert reader.read_row_group(i).equals(table)
@pytest.mark.pandas
def test_parquet_writer_with_caller_provided_filesystem():
out = pa.BufferOutputStream()
class CustomFS(FileSystem):
def __init__(self):
self.path = None
self.mode = None
def open(self, path, mode='rb'):
self.path = path
self.mode = mode
return out
fs = CustomFS()
fname = 'expected_fname.parquet'
df = _test_dataframe(100)
table = pa.Table.from_pandas(df, preserve_index=False)
with pq.ParquetWriter(fname, table.schema, filesystem=fs, version='2.0') \
as writer:
writer.write_table(table)
assert fs.path == fname
assert fs.mode == 'wb'
assert out.closed
buf = out.getvalue()
table_read = _read_table(pa.BufferReader(buf))
df_read = table_read.to_pandas()
tm.assert_frame_equal(df_read, df)
# Should raise ValueError when filesystem is passed with file-like object
with pytest.raises(ValueError) as err_info:
pq.ParquetWriter(pa.BufferOutputStream(), table.schema, filesystem=fs)
expected_msg = ("filesystem passed but where is file-like, so"
" there is nothing to open with filesystem.")
assert str(err_info) == expected_msg
def test_writing_empty_lists():
# ARROW-2591: [Python] Segmentation fault issue in pq.write_table
arr1 = pa.array([[], []], pa.list_(pa.int32()))
table = pa.Table.from_arrays([arr1], ['list(int32)'])
_check_roundtrip(table)
def test_write_nested_zero_length_array_chunk_failure():
# Bug report in ARROW-3792
cols = OrderedDict(
int32=pa.int32(),
list_string=pa.list_(pa.string())
)
data = [[], [OrderedDict(int32=1, list_string=('G',)), ]]
# This produces a table with a column like
# <Column name='list_string' type=ListType(list<item: string>)>
# [
# [],
# [
# [
# "G"
# ]
# ]
# ]
#
# Each column is a ChunkedArray with 2 elements
my_arrays = [pa.array(batch, type=pa.struct(cols)).flatten()
for batch in data]
my_batches = [pa.RecordBatch.from_arrays(batch, pa.schema(cols))
for batch in my_arrays]
tbl = pa.Table.from_batches(my_batches, pa.schema(cols))
_check_roundtrip(tbl)
@pytest.mark.pandas
def test_partitioned_dataset(tempdir):
# ARROW-3208: Segmentation fault when reading a Parquet partitioned dataset
# to a Parquet file
path = tempdir / "ARROW-3208"
df = pd.DataFrame({
'one': [-1, 10, 2.5, 100, 1000, 1, 29.2],
'two': [-1, 10, 2, 100, 1000, 1, 11],
'three': [0, 0, 0, 0, 0, 0, 0]
})
table = pa.Table.from_pandas(df)
pq.write_to_dataset(table, root_path=str(path),
partition_cols=['one', 'two'])
table = pq.ParquetDataset(path).read()
pq.write_table(table, path / "output.parquet")
def test_read_column_invalid_index():
table = pa.table([pa.array([4, 5]), pa.array(["foo", "bar"])],
names=['ints', 'strs'])
bio = pa.BufferOutputStream()
pq.write_table(table, bio)
f = pq.ParquetFile(bio.getvalue())
assert f.reader.read_column(0).to_pylist() == [4, 5]
assert f.reader.read_column(1).to_pylist() == ["foo", "bar"]
for index in (-1, 2):
with pytest.raises((ValueError, IndexError)):
f.reader.read_column(index)
def test_direct_read_dictionary():
# ARROW-3325
repeats = 10
nunique = 5
data = [
[tm.rands(10) for i in range(nunique)] * repeats,
]
table = pa.table(data, names=['f0'])
bio = pa.BufferOutputStream()
pq.write_table(table, bio)
contents = bio.getvalue()
result = pq.read_table(pa.BufferReader(contents),
read_dictionary=['f0'])
# Compute dictionary-encoded subfield
expected = pa.table([table[0].dictionary_encode()], names=['f0'])
assert result.equals(expected)
def test_dataset_read_dictionary(tempdir):
path = tempdir / "ARROW-3325-dataset"
t1 = pa.table([[tm.rands(10) for i in range(5)] * 10], names=['f0'])
t2 = pa.table([[tm.rands(10) for i in range(5)] * 10], names=['f0'])
pq.write_to_dataset(t1, root_path=str(path))
pq.write_to_dataset(t2, root_path=str(path))
result = pq.ParquetDataset(path, read_dictionary=['f0']).read()
# The order of the chunks is non-deterministic
ex_chunks = [t1[0].chunk(0).dictionary_encode(),
t2[0].chunk(0).dictionary_encode()]
assert result[0].num_chunks == 2
c0, c1 = result[0].chunk(0), result[0].chunk(1)
if c0.equals(ex_chunks[0]):
assert c1.equals(ex_chunks[1])
else:
assert c0.equals(ex_chunks[1])
assert c1.equals(ex_chunks[0])
def test_direct_read_dictionary_subfield():
repeats = 10
nunique = 5
data = [
[[tm.rands(10)] for i in range(nunique)] * repeats,
]
table = pa.table(data, names=['f0'])
bio = pa.BufferOutputStream()
pq.write_table(table, bio)
contents = bio.getvalue()
result = pq.read_table(pa.BufferReader(contents),
read_dictionary=['f0.list.item'])
arr = pa.array(data[0])
values_as_dict = arr.values.dictionary_encode()
inner_indices = values_as_dict.indices.cast('int32')
new_values = pa.DictionaryArray.from_arrays(inner_indices,
values_as_dict.dictionary)
offsets = pa.array(range(51), type='int32')
expected_arr = pa.ListArray.from_arrays(offsets, new_values)
expected = pa.table([expected_arr], names=['f0'])
assert result.equals(expected)
assert result[0].num_chunks == 1
@pytest.mark.pandas
def test_dataset_metadata(tempdir):
path = tempdir / "ARROW-1983-dataset"
# create and write a test dataset
df = pd.DataFrame({
'one': [1, 2, 3],
'two': [-1, -2, -3],
'three': [[1, 2], [2, 3], [3, 4]],
})
table = pa.Table.from_pandas(df)
metadata_list = []
pq.write_to_dataset(table, root_path=str(path),
partition_cols=['one', 'two'],
metadata_collector=metadata_list)
# open the dataset and collect metadata from pieces:
dataset = pq.ParquetDataset(path)
metadata_list2 = [p.get_metadata() for p in dataset.pieces]
# compare metadata list content:
assert len(metadata_list) == len(metadata_list2)
for md, md2 in zip(metadata_list, metadata_list2):
d = md.to_dict()
d2 = md2.to_dict()
# serialized_size is initialized in the reader:
assert d.pop('serialized_size') == 0
assert d2.pop('serialized_size') > 0
assert d == d2
def test_parquet_file_too_small(tempdir):
path = str(tempdir / "test.parquet")
with pytest.raises(pa.ArrowIOError,
match='size is 0 bytes'):
with open(path, 'wb') as f:
pass
pq.read_table(path)
with pytest.raises(pa.ArrowIOError,
match='size is 4 bytes'):
with open(path, 'wb') as f:
f.write(b'ffff')
pq.read_table(path)
@pytest.mark.pandas
def test_categorical_index_survives_roundtrip():
# ARROW-3652, addressed by ARROW-3246
df = pd.DataFrame([['a', 'b'], ['c', 'd']], columns=['c1', 'c2'])
df['c1'] = df['c1'].astype('category')
df = df.set_index(['c1'])
table = pa.Table.from_pandas(df)
bos = pa.BufferOutputStream()
pq.write_table(table, bos)
ref_df = pq.read_pandas(bos.getvalue()).to_pandas()
assert isinstance(ref_df.index, pd.CategoricalIndex)
assert ref_df.index.equals(df.index)
def test_dictionary_array_automatically_read():
# ARROW-3246
# Make a large dictionary, a little over 4MB of data
dict_length = 4000
dict_values = pa.array([('x' * 1000 + '_{}'.format(i))
for i in range(dict_length)])
num_chunks = 10
chunk_size = 100
chunks = []
for i in range(num_chunks):
indices = np.random.randint(0, dict_length,
size=chunk_size).astype(np.int32)
chunks.append(pa.DictionaryArray.from_arrays(pa.array(indices),
dict_values))
table = pa.table([pa.chunked_array(chunks)], names=['f0'])
bio = pa.BufferOutputStream()
pq.write_table(table, bio)
contents = bio.getvalue()
result = pq.read_table(pa.BufferReader(contents))
assert result.equals(table)
# The only key in the metadata was the Arrow schema key
assert result.schema.metadata is None
@pytest.mark.pandas
def test_pandas_categorical_na_type_row_groups():
# ARROW-5085
df = pd.DataFrame({"col": [None] * 100, "int": [1.0] * 100})
df_category = df.astype({"col": "category", "int": "category"})
table = pa.Table.from_pandas(df)
table_cat = pa.Table.from_pandas(df_category)
buf = pa.BufferOutputStream()
# it works
pq.write_table(table_cat, buf, version="2.0", chunk_size=10)
result = pq.read_table(buf.getvalue())
# Result is non-categorical
assert result[0].equals(table[0])
assert result[1].equals(table[1])
@pytest.mark.pandas
def test_pandas_categorical_roundtrip():
# ARROW-5480, this was enabled by ARROW-3246
# Have one of the categories unobserved and include a null (-1)
codes = np.array([2, 0, 0, 2, 0, -1, 2], dtype='int32')
categories = ['foo', 'bar', 'baz']
df = pd.DataFrame({'x': pd.Categorical.from_codes(
codes, categories=categories)})
buf = pa.BufferOutputStream()
pq.write_table(pa.table(df), buf)
result = pq.read_table(buf.getvalue()).to_pandas()
assert result.x.dtype == 'category'
assert (result.x.cat.categories == categories).all()
tm.assert_frame_equal(result, df)
@pytest.mark.pandas
def test_multi_dataset_metadata(tempdir):
filenames = ["ARROW-1983-dataset.0", "ARROW-1983-dataset.1"]
metapath = str(tempdir / "_metadata")
# create a test dataset
df = pd.DataFrame({
'one': [1, 2, 3],
'two': [-1, -2, -3],
'three': [[1, 2], [2, 3], [3, 4]],
})
table = pa.Table.from_pandas(df)
# write dataset twice and collect/merge metadata
_meta = None
for filename in filenames:
meta = []
pq.write_table(table, str(tempdir / filename),
metadata_collector=meta)
meta[0].set_file_path(filename)
if _meta is None:
_meta = meta[0]
else:
_meta.append_row_groups(meta[0])
# Write merged metadata-only file
with open(metapath, "wb") as f:
_meta.write_metadata_file(f)
# Read back the metadata
meta = pq.read_metadata(metapath)
md = meta.to_dict()
_md = _meta.to_dict()
for key in _md:
if key != 'serialized_size':
assert _md[key] == md[key]
assert _md['num_columns'] == 3
assert _md['num_rows'] == 6
assert _md['num_row_groups'] == 2
assert _md['serialized_size'] == 0
assert md['serialized_size'] > 0
@pytest.mark.pandas
def test_filter_before_validate_schema(tempdir):
# ARROW-4076 apply filter before schema validation
# to avoid checking unneeded schemas
# create partitioned dataset with mismatching schemas which would
# otherwise raise if first validation all schemas
dir1 = tempdir / 'A=0'
dir1.mkdir()
table1 = pa.Table.from_pandas(pd.DataFrame({'B': [1, 2, 3]}))
pq.write_table(table1, dir1 / 'data.parquet')
dir2 = tempdir / 'A=1'
dir2.mkdir()
table2 = pa.Table.from_pandas(pd.DataFrame({'B': ['a', 'b', 'c']}))
pq.write_table(table2, dir2 / 'data.parquet')
# read single file using filter
table = pq.read_table(tempdir, filters=[[('A', '==', 0)]])
assert table.column('B').equals(pa.chunked_array([[1, 2, 3]]))
| true
| true
|
f7175c38c11862b31ec72a419e525c555b34bcf3
| 16,570
|
py
|
Python
|
batch/processor_BondLedger_JP.py
|
BoostryJP/ibet-Issuer
|
efc599f8784be06588cf3ad8f239d36f24fdf3fa
|
[
"Apache-2.0"
] | 1
|
2021-06-16T03:38:07.000Z
|
2021-06-16T03:38:07.000Z
|
batch/processor_BondLedger_JP.py
|
BoostryJP/ibet-Issuer
|
efc599f8784be06588cf3ad8f239d36f24fdf3fa
|
[
"Apache-2.0"
] | 17
|
2021-04-26T03:28:40.000Z
|
2021-11-24T07:15:55.000Z
|
batch/processor_BondLedger_JP.py
|
BoostryJP/ibet-Issuer
|
efc599f8784be06588cf3ad8f239d36f24fdf3fa
|
[
"Apache-2.0"
] | 1
|
2021-05-30T14:09:11.000Z
|
2021-05-30T14:09:11.000Z
|
"""
Copyright BOOSTRY Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
import base64
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
from datetime import (
datetime,
timezone,
timedelta
)
import json
import os
import sys
import time
from eth_utils import to_checksum_address
from sqlalchemy import (
create_engine,
func
)
from sqlalchemy.orm import (
sessionmaker,
scoped_session
)
from web3 import Web3
from web3.middleware import geth_poa_middleware
path = os.path.join(os.path.dirname(__file__), '../')
sys.path.append(path)
from app.utils import ContractUtils
from app.models import (
Token,
UTXO,
BondLedger,
BondLedgerBlockNumber,
Issuer,
CorporateBondLedgerTemplate,
PersonalInfo as PersonalInfoModel
)
from config import Config
import log
process_name = "PROCESSOR-BondLedger"
LOG = log.get_logger(process_name=process_name)
web3 = Web3(Web3.HTTPProvider(Config.WEB3_HTTP_PROVIDER))
web3.middleware_onion.inject(geth_poa_middleware, layer=0)
engine = create_engine(Config.SQLALCHEMY_DATABASE_URI, echo=False)
db_session = scoped_session(sessionmaker())
db_session.configure(bind=engine)
JST = timezone(timedelta(hours=+9), "JST")
class Sinks:
def __init__(self):
self.sinks = []
def register(self, sink):
self.sinks.append(sink)
def on_utxo(self, *args, **kwargs):
for sink in self.sinks:
sink.on_utxo(*args, **kwargs)
def on_bond_ledger(self, *args, **kwargs):
for sink in self.sinks:
sink.on_bond_ledger(*args, **kwargs)
def flush(self, *args, **kwargs):
for sink in self.sinks:
sink.flush(*args, **kwargs)
class DBSink:
def __init__(self, db):
self.db = db
def on_utxo(self, spent: bool, transaction_hash: str,
account_address: str, token_address: str, amount: int,
block_timestamp: datetime, transaction_date_jst: str):
if spent is False:
LOG.debug(f"Append UTXO: account_address={account_address}, token_address={token_address}, amount={amount}")
utxo = self.db.query(UTXO). \
filter(UTXO.transaction_hash == transaction_hash). \
first()
if utxo is None:
utxo = UTXO()
utxo.transaction_hash = transaction_hash
utxo.account_address = account_address
utxo.token_address = token_address
utxo.amount = amount
utxo.block_timestamp = block_timestamp
utxo.transaction_date_jst = transaction_date_jst
self.db.add(utxo)
else:
LOG.debug(f"Spend UTXO: account_address={account_address}, token_address={token_address}, amount={amount}")
utxo_list = self.db.query(UTXO). \
filter(UTXO.account_address == account_address). \
filter(UTXO.token_address == token_address). \
filter(UTXO.amount > 0). \
order_by(UTXO.block_timestamp). \
all()
spend_amount = amount
for utxo in utxo_list:
utxo_amount = utxo.amount
if spend_amount <= 0:
pass
elif utxo.amount <= spend_amount:
utxo.amount = 0
spend_amount = spend_amount - utxo_amount
self.db.merge(utxo)
else:
utxo.amount = utxo_amount - spend_amount
spend_amount = 0
self.db.merge(utxo)
def on_bond_ledger(self, token):
#########################################
# 原簿作成日
#########################################
created_date = datetime.utcnow().replace(tzinfo=timezone.utc).astimezone(JST).strftime("%Y/%m/%d")
#########################################
# 社債の情報
#########################################
ledger_template = self.db.query(CorporateBondLedgerTemplate). \
filter(CorporateBondLedgerTemplate.token_address == token.address). \
first()
if ledger_template is not None:
bond_description = {
"社債名称": ledger_template.bond_name,
"社債の説明": ledger_template.bond_description,
"社債の総額": ledger_template.total_amount,
"各社債の金額": ledger_template.face_value,
"払込情報": {
"払込金額": ledger_template.payment_amount,
"払込日": ledger_template.payment_date,
"払込状況": ledger_template.payment_status
},
"社債の種類": ledger_template.bond_type
}
else:
bond_description = {
"社債名称": "",
"社債の説明": "",
"社債の総額": None,
"各社債の金額": None,
"払込情報": {
"払込金額": None,
"払込日": "",
"払込状況": None
},
"社債の種類": ""
}
#########################################
# 原簿管理人
#########################################
if ledger_template is not None:
ledger_admin = {
"氏名または名称": ledger_template.ledger_admin_name,
"住所": ledger_template.ledger_admin_address,
"事務取扱場所": ledger_template.ledger_admin_location
}
else:
ledger_admin = {
"氏名または名称": "",
"住所": "",
"事務取扱場所": ""
}
#########################################
# 債権者情報
#########################################
issuer_address = token.functions.owner().call()
face_value = token.functions.faceValue().call()
utxo_list = self.db.query(UTXO.account_address, UTXO.token_address, func.sum(UTXO.amount),
UTXO.transaction_date_jst). \
filter(UTXO.token_address == token.address). \
filter(UTXO.amount > 0). \
group_by(UTXO.account_address, UTXO.token_address, UTXO.transaction_date_jst). \
all()
creditors = []
for utxo in utxo_list:
account_address = utxo[0]
amount = utxo[2]
transaction_date_jst = utxo[3]
# 初期値設定
details = {
"アカウントアドレス": account_address,
"氏名または名称": "",
"住所": "",
"社債金額": face_value * amount,
"取得日": transaction_date_jst,
"金銭以外の財産給付情報": {
"財産の価格": "-",
"給付日": "-"
},
"債権相殺情報": {
"相殺する債権額": "-",
"相殺日": "-"
},
"質権情報": {
"質権者の氏名または名称": "-",
"質権者の住所": "-",
"質権の目的である債券": "-"
},
"備考": "-"
}
# 個人情報取得
personal_info_json = self.__get_personalinfo_from_db(
account_address=account_address,
issuer_address=issuer_address
)
if personal_info_json is None: # DBに情報が登録されていない場合はコントラクトから情報を取得する
personal_info_contract_address = token.functions.personalInfoAddress().call()
personal_info_json = self.__get_personalinfo_from_contract(
account_address=account_address,
issuer_address=issuer_address,
personal_info_contract_address=personal_info_contract_address
)
if personal_info_json is not None:
name = personal_info_json.get("name", "") # 氏名
address = personal_info_json.get("address", "") # 住所
else:
name = ""
address = ""
# 保有者情報設定
details["氏名または名称"] = name
details["住所"] = address
creditors.append(details)
# 原簿保管
ledger = {
"社債原簿作成日": created_date,
"社債情報": bond_description,
"社債原簿管理人": ledger_admin,
"社債権者": creditors
}
bond_ledger = BondLedger(
token_address=token.address,
ledger=json.dumps(ledger, ensure_ascii=False).encode()
)
self.db.add(bond_ledger)
def __get_personalinfo_from_db(self, account_address: str, issuer_address: str):
"""個人情報取得(DB)
:param account_address: アカウントアドレス
:param issuer_address: 発行体アドレス
:return: 個人情報JSON
"""
# 個人情報取得
personal_info_record = self.db.query(PersonalInfoModel). \
filter(PersonalInfoModel.account_address == to_checksum_address(account_address)). \
filter(PersonalInfoModel.issuer_address == to_checksum_address(issuer_address)). \
first()
if personal_info_record is not None:
personal_info_json = personal_info_record.personal_info
else:
personal_info_json = None
return personal_info_json
def __get_personalinfo_from_contract(self, account_address: str, issuer_address: str,
personal_info_contract_address: str):
"""個人情報取得(コントラクト)
:param account_address: アカウントアドレス
:param issuer_address: 発行体アドレス
:param personal_info_contract_address: 個人情報コントラクトアドレス
:return: 個人情報JSON
"""
personal_info_json = None
try:
issuer = self.db.query(Issuer).filter(Issuer.eth_account == issuer_address).first()
personal_info_contract = ContractUtils.get_contract('PersonalInfo', personal_info_contract_address)
cipher = None
try:
key = RSA.importKey(issuer.encrypted_rsa_private_key, Config.RSA_PASSWORD)
cipher = PKCS1_OAEP.new(key)
except Exception as err:
LOG.error(f"Cannot open the private key: {err}")
# 暗号化個人情報取得
personal_info = personal_info_contract.functions. \
personal_info(account_address, issuer_address). \
call()
encrypted_personal_info = personal_info[2]
if encrypted_personal_info != '' and cipher is not None: # 情報が空の場合、デフォルト値を設定
# 個人情報復号化
ciphertext = base64.decodebytes(encrypted_personal_info.encode('utf-8'))
# NOTE:
# JavaScriptでRSA暗号化する際に、先頭が0x00の場合は00を削った状態でデータが連携される。
# そのままdecryptすると、ValueError(Ciphertext with incorrect length)になるため、
# 先頭に再度00を加えて、decryptを行う。
if len(ciphertext) == 1279:
hex_fixed = "00" + ciphertext.hex()
ciphertext = base64.b16decode(hex_fixed.upper())
message = cipher.decrypt(ciphertext)
personal_info_json = json.loads(message)
except Exception as err:
LOG.error(f"Failed to decrypt: {err} : account_address = {account_address}")
return personal_info_json
def flush(self):
self.db.commit()
class Processor:
def __init__(self, db, sink):
self.sink = sink
self.db = db
self.token_list = []
def process(self):
self.__refresh_token_list()
ledger_block_number = self.__get_ledger_blocknumber()
latest_block = web3.eth.blockNumber
if ledger_block_number >= latest_block:
LOG.debug("skip process")
pass
else:
LOG.debug("syncing from={}, to={}".format(ledger_block_number + 1, latest_block))
for token in self.token_list:
event_triggered = self.__create_utxo(token, ledger_block_number + 1, latest_block)
if event_triggered: # UTXOの更新イベントが発生している場合
self.__create_ledger(token)
self.__set_ledger_blocknumber(latest_block)
self.sink.flush()
def __refresh_token_list(self):
"""発行済トークンの直近化
:return: None
"""
self.token_list = []
issued_tokens = self.db.query(Token). \
filter(Token.template_id == Config.TEMPLATE_ID_SB). \
all()
for issued_token in issued_tokens:
if issued_token.token_address is not None:
abi = json.loads(issued_token.abi.replace("'", '"').replace('True', 'true').replace('False', 'false'))
token_contract = web3.eth.contract(
address=issued_token.token_address,
abi=abi
)
self.token_list.append(token_contract)
def __get_ledger_blocknumber(self):
block_number = self.db.query(BondLedgerBlockNumber).first()
if block_number is None:
return 0
else:
return block_number.latest_block_number
def __set_ledger_blocknumber(self, block_number: int):
"""latest block number の設定
:param block_number: 設定するblockNumber
:return: None
"""
ledger_block = self.db.query(BondLedgerBlockNumber).first()
if ledger_block is None:
ledger_block = BondLedgerBlockNumber()
ledger_block.latest_block_number = block_number
else:
ledger_block.latest_block_number = block_number
self.db.merge(ledger_block)
def __create_utxo(self, token, from_block: int, to_block: int) -> bool:
"""UTXO作成(Transferイベント発生時)
:param token: token contract
:param from_block: from block number
:param to_block: to block number
:return: event_triggered イベント発生
"""
event_triggered = False
events = token.events.Transfer.getLogs(
fromBlock=from_block,
toBlock=to_block
)
for event in events:
event_triggered = True
transaction_hash = event["transactionHash"].hex()
args = event["args"]
from_account = args.get("from", Config.ZERO_ADDRESS)
to_account = args.get("to", Config.ZERO_ADDRESS)
amount = args.get("value")
block_timestamp = datetime.fromtimestamp(
web3.eth.getBlock(event['blockNumber'])['timestamp']
)
block_timestamp_jst = block_timestamp.replace(tzinfo=timezone.utc). \
astimezone(JST)
transaction_date_jst = block_timestamp_jst.strftime("%Y/%m/%d")
if amount is not None and amount <= sys.maxsize:
# UTXOの更新(from account)
self.sink.on_utxo(
spent=True,
transaction_hash=transaction_hash,
token_address=token.address,
account_address=from_account,
amount=amount,
block_timestamp=block_timestamp,
transaction_date_jst=transaction_date_jst
)
# UTXOの更新(to account)
self.sink.on_utxo(
spent=False,
transaction_hash=transaction_hash,
token_address=token.address,
account_address=to_account,
amount=amount,
block_timestamp=block_timestamp,
transaction_date_jst=transaction_date_jst
)
return event_triggered
def __create_ledger(self, token):
"""原簿作成
:param token: token contract
"""
self.sink.on_bond_ledger(token=token)
sinks = Sinks()
sinks.register(DBSink(db_session))
processor = Processor(db=db_session, sink=sinks)
LOG.info("Service started successfully")
while True:
try:
processor.process()
LOG.debug("processed")
except Exception as ex:
LOG.exception(ex)
# 1分間隔で実行
time.sleep(Config.INTERVAL_PROCESSOR_BOND_LEDGER_JP)
| 34.957806
| 120
| 0.559626
|
import base64
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
from datetime import (
datetime,
timezone,
timedelta
)
import json
import os
import sys
import time
from eth_utils import to_checksum_address
from sqlalchemy import (
create_engine,
func
)
from sqlalchemy.orm import (
sessionmaker,
scoped_session
)
from web3 import Web3
from web3.middleware import geth_poa_middleware
path = os.path.join(os.path.dirname(__file__), '../')
sys.path.append(path)
from app.utils import ContractUtils
from app.models import (
Token,
UTXO,
BondLedger,
BondLedgerBlockNumber,
Issuer,
CorporateBondLedgerTemplate,
PersonalInfo as PersonalInfoModel
)
from config import Config
import log
process_name = "PROCESSOR-BondLedger"
LOG = log.get_logger(process_name=process_name)
web3 = Web3(Web3.HTTPProvider(Config.WEB3_HTTP_PROVIDER))
web3.middleware_onion.inject(geth_poa_middleware, layer=0)
engine = create_engine(Config.SQLALCHEMY_DATABASE_URI, echo=False)
db_session = scoped_session(sessionmaker())
db_session.configure(bind=engine)
JST = timezone(timedelta(hours=+9), "JST")
class Sinks:
def __init__(self):
self.sinks = []
def register(self, sink):
self.sinks.append(sink)
def on_utxo(self, *args, **kwargs):
for sink in self.sinks:
sink.on_utxo(*args, **kwargs)
def on_bond_ledger(self, *args, **kwargs):
for sink in self.sinks:
sink.on_bond_ledger(*args, **kwargs)
def flush(self, *args, **kwargs):
for sink in self.sinks:
sink.flush(*args, **kwargs)
class DBSink:
def __init__(self, db):
self.db = db
def on_utxo(self, spent: bool, transaction_hash: str,
account_address: str, token_address: str, amount: int,
block_timestamp: datetime, transaction_date_jst: str):
if spent is False:
LOG.debug(f"Append UTXO: account_address={account_address}, token_address={token_address}, amount={amount}")
utxo = self.db.query(UTXO). \
filter(UTXO.transaction_hash == transaction_hash). \
first()
if utxo is None:
utxo = UTXO()
utxo.transaction_hash = transaction_hash
utxo.account_address = account_address
utxo.token_address = token_address
utxo.amount = amount
utxo.block_timestamp = block_timestamp
utxo.transaction_date_jst = transaction_date_jst
self.db.add(utxo)
else:
LOG.debug(f"Spend UTXO: account_address={account_address}, token_address={token_address}, amount={amount}")
utxo_list = self.db.query(UTXO). \
filter(UTXO.account_address == account_address). \
filter(UTXO.token_address == token_address). \
filter(UTXO.amount > 0). \
order_by(UTXO.block_timestamp). \
all()
spend_amount = amount
for utxo in utxo_list:
utxo_amount = utxo.amount
if spend_amount <= 0:
pass
elif utxo.amount <= spend_amount:
utxo.amount = 0
spend_amount = spend_amount - utxo_amount
self.db.merge(utxo)
else:
utxo.amount = utxo_amount - spend_amount
spend_amount = 0
self.db.merge(utxo)
def on_bond_ledger(self, token):
info_json
def flush(self):
self.db.commit()
class Processor:
def __init__(self, db, sink):
self.sink = sink
self.db = db
self.token_list = []
def process(self):
self.__refresh_token_list()
ledger_block_number = self.__get_ledger_blocknumber()
latest_block = web3.eth.blockNumber
if ledger_block_number >= latest_block:
LOG.debug("skip process")
pass
else:
LOG.debug("syncing from={}, to={}".format(ledger_block_number + 1, latest_block))
for token in self.token_list:
event_triggered = self.__create_utxo(token, ledger_block_number + 1, latest_block)
if event_triggered:
self.__create_ledger(token)
self.__set_ledger_blocknumber(latest_block)
self.sink.flush()
def __refresh_token_list(self):
self.token_list = []
issued_tokens = self.db.query(Token). \
filter(Token.template_id == Config.TEMPLATE_ID_SB). \
all()
for issued_token in issued_tokens:
if issued_token.token_address is not None:
abi = json.loads(issued_token.abi.replace("'", '"').replace('True', 'true').replace('False', 'false'))
token_contract = web3.eth.contract(
address=issued_token.token_address,
abi=abi
)
self.token_list.append(token_contract)
def __get_ledger_blocknumber(self):
block_number = self.db.query(BondLedgerBlockNumber).first()
if block_number is None:
return 0
else:
return block_number.latest_block_number
def __set_ledger_blocknumber(self, block_number: int):
ledger_block = self.db.query(BondLedgerBlockNumber).first()
if ledger_block is None:
ledger_block = BondLedgerBlockNumber()
ledger_block.latest_block_number = block_number
else:
ledger_block.latest_block_number = block_number
self.db.merge(ledger_block)
def __create_utxo(self, token, from_block: int, to_block: int) -> bool:
event_triggered = False
events = token.events.Transfer.getLogs(
fromBlock=from_block,
toBlock=to_block
)
for event in events:
event_triggered = True
transaction_hash = event["transactionHash"].hex()
args = event["args"]
from_account = args.get("from", Config.ZERO_ADDRESS)
to_account = args.get("to", Config.ZERO_ADDRESS)
amount = args.get("value")
block_timestamp = datetime.fromtimestamp(
web3.eth.getBlock(event['blockNumber'])['timestamp']
)
block_timestamp_jst = block_timestamp.replace(tzinfo=timezone.utc). \
astimezone(JST)
transaction_date_jst = block_timestamp_jst.strftime("%Y/%m/%d")
if amount is not None and amount <= sys.maxsize:
# UTXOの更新(from account)
self.sink.on_utxo(
spent=True,
transaction_hash=transaction_hash,
token_address=token.address,
account_address=from_account,
amount=amount,
block_timestamp=block_timestamp,
transaction_date_jst=transaction_date_jst
)
# UTXOの更新(to account)
self.sink.on_utxo(
spent=False,
transaction_hash=transaction_hash,
token_address=token.address,
account_address=to_account,
amount=amount,
block_timestamp=block_timestamp,
transaction_date_jst=transaction_date_jst
)
return event_triggered
def __create_ledger(self, token):
self.sink.on_bond_ledger(token=token)
sinks = Sinks()
sinks.register(DBSink(db_session))
processor = Processor(db=db_session, sink=sinks)
LOG.info("Service started successfully")
while True:
try:
processor.process()
LOG.debug("processed")
except Exception as ex:
LOG.exception(ex)
# 1分間隔で実行
time.sleep(Config.INTERVAL_PROCESSOR_BOND_LEDGER_JP)
| true
| true
|
f7175d22337c8b28777d73b05a950345e75f3ce4
| 1,993
|
py
|
Python
|
app/forms.py
|
TimothyBenger/top_lists
|
b3b5895a3a3c525e81fe167eb7d7ba46cfcbd785
|
[
"MIT"
] | null | null | null |
app/forms.py
|
TimothyBenger/top_lists
|
b3b5895a3a3c525e81fe167eb7d7ba46cfcbd785
|
[
"MIT"
] | null | null | null |
app/forms.py
|
TimothyBenger/top_lists
|
b3b5895a3a3c525e81fe167eb7d7ba46cfcbd785
|
[
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, IntegerField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo
from app.models import User
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField(
'Repeat Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Register')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError('Please use a different username.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError('Please use a different email address.')
class EditForm(FlaskForm):
title1 = StringField('Title', validators=[DataRequired()])
author1 = StringField('Author', validators=[DataRequired()])
title2 = StringField('Title', validators=[DataRequired()])
author2 = StringField('Author', validators=[DataRequired()])
title3 = StringField('Title', validators=[DataRequired()])
author3 = StringField('Author', validators=[DataRequired()])
title4 = StringField('Title', validators=[DataRequired()])
author4 = StringField('Author', validators=[DataRequired()])
title5 = StringField('Title', validators=[DataRequired()])
author5 = StringField('Author', validators=[DataRequired()])
submit = SubmitField('Submit changes')
| 44.288889
| 87
| 0.714501
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, IntegerField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo
from app.models import User
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField(
'Repeat Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Register')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError('Please use a different username.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError('Please use a different email address.')
class EditForm(FlaskForm):
title1 = StringField('Title', validators=[DataRequired()])
author1 = StringField('Author', validators=[DataRequired()])
title2 = StringField('Title', validators=[DataRequired()])
author2 = StringField('Author', validators=[DataRequired()])
title3 = StringField('Title', validators=[DataRequired()])
author3 = StringField('Author', validators=[DataRequired()])
title4 = StringField('Title', validators=[DataRequired()])
author4 = StringField('Author', validators=[DataRequired()])
title5 = StringField('Title', validators=[DataRequired()])
author5 = StringField('Author', validators=[DataRequired()])
submit = SubmitField('Submit changes')
| true
| true
|
f7175d5c72e50e87ef42937e0544adccadf5efa8
| 452
|
py
|
Python
|
lidi/signup/migrations/0002_user_conf_link.py
|
campovski/lidi
|
9699e62e70e679970816e29ca7618c9ed0146c7e
|
[
"Apache-2.0"
] | null | null | null |
lidi/signup/migrations/0002_user_conf_link.py
|
campovski/lidi
|
9699e62e70e679970816e29ca7618c9ed0146c7e
|
[
"Apache-2.0"
] | 21
|
2017-06-03T14:16:14.000Z
|
2018-05-29T07:28:27.000Z
|
lidi/signup/migrations/0002_user_conf_link.py
|
campovski/lidi
|
9699e62e70e679970816e29ca7618c9ed0146c7e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-02 18:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('signup', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='conf_link',
field=models.CharField(default=b'', max_length=200),
),
]
| 21.52381
| 64
| 0.606195
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('signup', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='conf_link',
field=models.CharField(default=b'', max_length=200),
),
]
| true
| true
|
f7175d75f291571b61c64c017b95e8592d28ea76
| 424
|
py
|
Python
|
app/moviestore/migrations/0007_alter_movie_user_charge.py
|
GeorgiosDolias/Movie-Store-REST-API
|
3a07301e4574071d6edb00d1a8b2c266c1fc8ff1
|
[
"MIT"
] | null | null | null |
app/moviestore/migrations/0007_alter_movie_user_charge.py
|
GeorgiosDolias/Movie-Store-REST-API
|
3a07301e4574071d6edb00d1a8b2c266c1fc8ff1
|
[
"MIT"
] | null | null | null |
app/moviestore/migrations/0007_alter_movie_user_charge.py
|
GeorgiosDolias/Movie-Store-REST-API
|
3a07301e4574071d6edb00d1a8b2c266c1fc8ff1
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.8 on 2021-11-05 00:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('moviestore', '0006_movie_user_charge'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='user_charge',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=5),
),
]
| 22.315789
| 82
| 0.620283
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('moviestore', '0006_movie_user_charge'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='user_charge',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=5),
),
]
| true
| true
|
f7175f33e2db2534a03ad6bfb8c47d7e1b04f568
| 486
|
py
|
Python
|
main/strings/slice/words.py
|
catalinprescure/python-pages
|
93df3b22df2cfa269127e803a1b6c6a34bae6745
|
[
"MIT"
] | null | null | null |
main/strings/slice/words.py
|
catalinprescure/python-pages
|
93df3b22df2cfa269127e803a1b6c6a34bae6745
|
[
"MIT"
] | null | null | null |
main/strings/slice/words.py
|
catalinprescure/python-pages
|
93df3b22df2cfa269127e803a1b6c6a34bae6745
|
[
"MIT"
] | 1
|
2021-12-24T15:58:32.000Z
|
2021-12-24T15:58:32.000Z
|
# Open file and search through words list
# Return number of words with no e in them
import os
file = os.path.dirname(__file__) + "/words.txt"
rows = open(file)
def has_no_e(word):
for letter in word:
if letter == "e":
return False
return True
W = [] # words
E = [] # word with no e
for row in rows:
word = row.strip()
W.append(word)
if (has_no_e(word)):
E.append(word)
print("W: " + repr(len(W)))
print("E: " + repr(len(E)))
| 18.692308
| 47
| 0.584362
|
import os
file = os.path.dirname(__file__) + "/words.txt"
rows = open(file)
def has_no_e(word):
for letter in word:
if letter == "e":
return False
return True
W = []
E = []
for row in rows:
word = row.strip()
W.append(word)
if (has_no_e(word)):
E.append(word)
print("W: " + repr(len(W)))
print("E: " + repr(len(E)))
| true
| true
|
f717606f6302397f6adbcd4ea5ca8a1e1e665802
| 374
|
py
|
Python
|
setup.py
|
adamjoshuagray/PredictItPy
|
4006cb00f38c256765b556b3476c286470555533
|
[
"MIT"
] | null | null | null |
setup.py
|
adamjoshuagray/PredictItPy
|
4006cb00f38c256765b556b3476c286470555533
|
[
"MIT"
] | 1
|
2016-11-08T10:02:31.000Z
|
2016-11-08T10:02:31.000Z
|
setup.py
|
adamjoshuagray/PredictItPy
|
4006cb00f38c256765b556b3476c286470555533
|
[
"MIT"
] | 1
|
2019-07-04T10:53:41.000Z
|
2019-07-04T10:53:41.000Z
|
from setuptools import setup
setup(name='predictitpy',
version='0.2',
py_modules=['predictitpy'],
description='A very light wrapper around the PredictIt.org market data api.',
url='https://github.com/adamjoshuagray/predictitpy',
author='Adam J. Gray',
author_email='adam.joshua.gray@gmail.com',
license='MIT',
zip_safe=False)
| 34
| 83
| 0.671123
|
from setuptools import setup
setup(name='predictitpy',
version='0.2',
py_modules=['predictitpy'],
description='A very light wrapper around the PredictIt.org market data api.',
url='https://github.com/adamjoshuagray/predictitpy',
author='Adam J. Gray',
author_email='adam.joshua.gray@gmail.com',
license='MIT',
zip_safe=False)
| true
| true
|
f71763968b7433700606cc9fe3afcf0b874429db
| 129,676
|
py
|
Python
|
salt/ext/tornado/web.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 1
|
2020-03-31T22:51:16.000Z
|
2020-03-31T22:51:16.000Z
|
salt/ext/tornado/web.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
salt/ext/tornado/web.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-09-30T07:00:01.000Z
|
2021-09-30T07:00:01.000Z
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""``tornado.web`` provides a simple web framework with asynchronous
features that allow it to scale to large numbers of open connections,
making it ideal for `long polling
<http://en.wikipedia.org/wiki/Push_technology#Long_polling>`_.
Here is a simple "Hello, world" example app:
.. testcode::
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
if __name__ == "__main__":
application = tornado.web.Application([
(r"/", MainHandler),
])
application.listen(8888)
tornado.ioloop.IOLoop.current().start()
.. testoutput::
:hide:
See the :doc:`guide` for additional information.
Thread-safety notes
-------------------
In general, methods on `RequestHandler` and elsewhere in Tornado are
not thread-safe. In particular, methods such as
`~RequestHandler.write()`, `~RequestHandler.finish()`, and
`~RequestHandler.flush()` must only be called from the main thread. If
you use multiple threads it is important to use `.IOLoop.add_callback`
to transfer control back to the main thread before finishing the
request.
"""
# pylint: skip-file
from __future__ import absolute_import, division, print_function
import base64
import binascii
import datetime
import email.utils
import functools
import gzip
import hashlib
import hmac
import mimetypes
import numbers
import os.path
import re
import stat
import sys
import threading
import time
import salt.ext.tornado as tornado
import traceback
import types
from inspect import isclass
from io import BytesIO
from salt.ext.tornado.concurrent import Future
from salt.ext.tornado import escape
from salt.ext.tornado import gen
from salt.ext.tornado import httputil
from salt.ext.tornado import iostream
from salt.ext.tornado import locale
from salt.ext.tornado.log import access_log, app_log, gen_log
from salt.ext.tornado import stack_context
from salt.ext.tornado import template
from salt.ext.tornado.escape import utf8, _unicode
from salt.ext.tornado.routing import (AnyMatches, DefaultHostMatches, HostMatches,
ReversibleRouter, Rule, ReversibleRuleRouter,
URLSpec)
from salt.ext.tornado.util import (ObjectDict, raise_exc_info,
unicode_type, _websocket_mask, PY3)
url = URLSpec
if PY3:
import http.cookies as Cookie
import urllib.parse as urlparse
from urllib.parse import urlencode
else:
import Cookie
import urlparse
from urllib import urlencode
try:
import typing # noqa
# The following types are accepted by RequestHandler.set_header
# and related methods.
_HeaderTypes = typing.Union[bytes, unicode_type,
numbers.Integral, datetime.datetime]
except ImportError:
pass
MIN_SUPPORTED_SIGNED_VALUE_VERSION = 1
"""The oldest signed value version supported by this version of Tornado.
Signed values older than this version cannot be decoded.
.. versionadded:: 3.2.1
"""
MAX_SUPPORTED_SIGNED_VALUE_VERSION = 2
"""The newest signed value version supported by this version of Tornado.
Signed values newer than this version cannot be decoded.
.. versionadded:: 3.2.1
"""
DEFAULT_SIGNED_VALUE_VERSION = 2
"""The signed value version produced by `.RequestHandler.create_signed_value`.
May be overridden by passing a ``version`` keyword argument.
.. versionadded:: 3.2.1
"""
DEFAULT_SIGNED_VALUE_MIN_VERSION = 1
"""The oldest signed value accepted by `.RequestHandler.get_secure_cookie`.
May be overridden by passing a ``min_version`` keyword argument.
.. versionadded:: 3.2.1
"""
class RequestHandler(object):
"""Base class for HTTP request handlers.
Subclasses must define at least one of the methods defined in the
"Entry points" section below.
"""
SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PATCH", "PUT",
"OPTIONS")
_template_loaders = {} # type: typing.Dict[str, template.BaseLoader]
_template_loader_lock = threading.Lock()
_remove_control_chars_regex = re.compile(r"[\x00-\x08\x0e-\x1f]")
def __init__(self, application, request, **kwargs):
super(RequestHandler, self).__init__()
self.application = application
self.request = request
self._headers_written = False
self._finished = False
self._auto_finish = True
self._transforms = None # will be set in _execute
self._prepared_future = None
self._headers = None # type: httputil.HTTPHeaders
self.path_args = None
self.path_kwargs = None
self.ui = ObjectDict((n, self._ui_method(m)) for n, m in
application.ui_methods.items())
# UIModules are available as both `modules` and `_tt_modules` in the
# template namespace. Historically only `modules` was available
# but could be clobbered by user additions to the namespace.
# The template {% module %} directive looks in `_tt_modules` to avoid
# possible conflicts.
self.ui["_tt_modules"] = _UIModuleNamespace(self,
application.ui_modules)
self.ui["modules"] = self.ui["_tt_modules"]
self.clear()
self.request.connection.set_close_callback(self.on_connection_close)
self.initialize(**kwargs)
def initialize(self):
"""Hook for subclass initialization. Called for each request.
A dictionary passed as the third argument of a url spec will be
supplied as keyword arguments to initialize().
Example::
class ProfileHandler(RequestHandler):
def initialize(self, database):
self.database = database
def get(self, username):
...
app = Application([
(r'/user/(.*)', ProfileHandler, dict(database=database)),
])
"""
pass
@property
def settings(self):
"""An alias for `self.application.settings <Application.settings>`."""
return self.application.settings
def head(self, *args, **kwargs):
raise HTTPError(405)
def get(self, *args, **kwargs):
raise HTTPError(405)
def post(self, *args, **kwargs):
raise HTTPError(405)
def delete(self, *args, **kwargs):
raise HTTPError(405)
def patch(self, *args, **kwargs):
raise HTTPError(405)
def put(self, *args, **kwargs):
raise HTTPError(405)
def options(self, *args, **kwargs):
raise HTTPError(405)
def prepare(self):
"""Called at the beginning of a request before `get`/`post`/etc.
Override this method to perform common initialization regardless
of the request method.
Asynchronous support: Decorate this method with `.gen.coroutine`
or `.return_future` to make it asynchronous (the
`asynchronous` decorator cannot be used on `prepare`).
If this method returns a `.Future` execution will not proceed
until the `.Future` is done.
.. versionadded:: 3.1
Asynchronous support.
"""
pass
def on_finish(self):
"""Called after the end of a request.
Override this method to perform cleanup, logging, etc.
This method is a counterpart to `prepare`. ``on_finish`` may
not produce any output, as it is called after the response
has been sent to the client.
"""
pass
def on_connection_close(self):
"""Called in async handlers if the client closed the connection.
Override this to clean up resources associated with
long-lived connections. Note that this method is called only if
the connection was closed during asynchronous processing; if you
need to do cleanup after every request override `on_finish`
instead.
Proxies may keep a connection open for a time (perhaps
indefinitely) after the client has gone away, so this method
may not be called promptly after the end user closes their
connection.
"""
if _has_stream_request_body(self.__class__):
if not self.request.body.done():
self.request.body.set_exception(iostream.StreamClosedError())
self.request.body.exception()
def clear(self):
"""Resets all headers and content for this response."""
self._headers = httputil.HTTPHeaders({
"Server": "TornadoServer/%s" % tornado.version,
"Content-Type": "text/html; charset=UTF-8",
"Date": httputil.format_timestamp(time.time()),
})
self.set_default_headers()
self._write_buffer = []
self._status_code = 200
self._reason = httputil.responses[200]
def set_default_headers(self):
"""Override this to set HTTP headers at the beginning of the request.
For example, this is the place to set a custom ``Server`` header.
Note that setting such headers in the normal flow of request
processing may not do what you want, since headers may be reset
during error handling.
"""
pass
def set_status(self, status_code, reason=None):
"""Sets the status code for our response.
:arg int status_code: Response status code. If ``reason`` is ``None``,
it must be present in `httplib.responses <http.client.responses>`.
:arg string reason: Human-readable reason phrase describing the status
code. If ``None``, it will be filled in from
`httplib.responses <http.client.responses>`.
"""
self._status_code = status_code
if reason is not None:
self._reason = escape.native_str(reason)
else:
try:
self._reason = httputil.responses[status_code]
except KeyError:
raise ValueError("unknown status code %d" % status_code)
def get_status(self):
"""Returns the status code for our response."""
return self._status_code
def set_header(self, name, value):
# type: (str, _HeaderTypes) -> None
"""Sets the given response header name and value.
If a datetime is given, we automatically format it according to the
HTTP specification. If the value is not a string, we convert it to
a string. All header values are then encoded as UTF-8.
"""
self._headers[name] = self._convert_header_value(value)
def add_header(self, name, value):
# type: (str, _HeaderTypes) -> None
"""Adds the given response header and value.
Unlike `set_header`, `add_header` may be called multiple times
to return multiple values for the same header.
"""
self._headers.add(name, self._convert_header_value(value))
def clear_header(self, name):
"""Clears an outgoing header, undoing a previous `set_header` call.
Note that this method does not apply to multi-valued headers
set by `add_header`.
"""
if name in self._headers:
del self._headers[name]
_INVALID_HEADER_CHAR_RE = re.compile(r"[\x00-\x1f]")
def _convert_header_value(self, value):
# type: (_HeaderTypes) -> str
# Convert the input value to a str. This type check is a bit
# subtle: The bytes case only executes on python 3, and the
# unicode case only executes on python 2, because the other
# cases are covered by the first match for str.
if isinstance(value, str):
retval = value
elif isinstance(value, bytes): # py3
# Non-ascii characters in headers are not well supported,
# but if you pass bytes, use latin1 so they pass through as-is.
retval = value.decode('latin1')
elif isinstance(value, unicode_type): # py2
# TODO: This is inconsistent with the use of latin1 above,
# but it's been that way for a long time. Should it change?
retval = escape.utf8(value)
elif isinstance(value, numbers.Integral):
# return immediately since we know the converted value will be safe
return str(value)
elif isinstance(value, datetime.datetime):
return httputil.format_timestamp(value)
else:
raise TypeError("Unsupported header value %r" % value)
# If \n is allowed into the header, it is possible to inject
# additional headers or split the request.
if RequestHandler._INVALID_HEADER_CHAR_RE.search(retval):
raise ValueError("Unsafe header value %r", retval)
return retval
_ARG_DEFAULT = object()
def get_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
"""
return self._get_argument(name, default, self.request.arguments, strip)
def get_arguments(self, name, strip=True):
"""Returns a list of the arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
"""
# Make sure `get_arguments` isn't accidentally being called with a
# positional argument that's assumed to be a default (like in
# `get_argument`.)
assert isinstance(strip, bool)
return self._get_arguments(name, self.request.arguments, strip)
def get_body_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name
from the request body.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
.. versionadded:: 3.2
"""
return self._get_argument(name, default, self.request.body_arguments,
strip)
def get_body_arguments(self, name, strip=True):
"""Returns a list of the body arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
.. versionadded:: 3.2
"""
return self._get_arguments(name, self.request.body_arguments, strip)
def get_query_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name
from the request query string.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
.. versionadded:: 3.2
"""
return self._get_argument(name, default,
self.request.query_arguments, strip)
def get_query_arguments(self, name, strip=True):
"""Returns a list of the query arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
.. versionadded:: 3.2
"""
return self._get_arguments(name, self.request.query_arguments, strip)
def _get_argument(self, name, default, source, strip=True):
args = self._get_arguments(name, source, strip=strip)
if not args:
if default is self._ARG_DEFAULT:
raise MissingArgumentError(name)
return default
return args[-1]
def _get_arguments(self, name, source, strip=True):
values = []
for v in source.get(name, []):
v = self.decode_argument(v, name=name)
if isinstance(v, unicode_type):
# Get rid of any weird control chars (unless decoding gave
# us bytes, in which case leave it alone)
v = RequestHandler._remove_control_chars_regex.sub(" ", v)
if strip:
v = v.strip()
values.append(v)
return values
def decode_argument(self, value, name=None):
"""Decodes an argument from the request.
The argument has been percent-decoded and is now a byte string.
By default, this method decodes the argument as utf-8 and returns
a unicode string, but this may be overridden in subclasses.
This method is used as a filter for both `get_argument()` and for
values extracted from the url and passed to `get()`/`post()`/etc.
The name of the argument is provided if known, but may be None
(e.g. for unnamed groups in the url regex).
"""
try:
return _unicode(value)
except UnicodeDecodeError:
raise HTTPError(400, "Invalid unicode in %s: %r" %
(name or "url", value[:40]))
@property
def cookies(self):
"""An alias for
`self.request.cookies <.httputil.HTTPServerRequest.cookies>`."""
return self.request.cookies
def get_cookie(self, name, default=None):
"""Gets the value of the cookie with the given name, else default."""
if self.request.cookies is not None and name in self.request.cookies:
return self.request.cookies[name].value
return default
def set_cookie(self, name, value, domain=None, expires=None, path="/",
expires_days=None, **kwargs):
"""Sets the given cookie name/value with the given options.
Additional keyword arguments are set on the Cookie.Morsel
directly.
See https://docs.python.org/2/library/cookie.html#Cookie.Morsel
for available attributes.
"""
# The cookie library only accepts type str, in both python 2 and 3
name = escape.native_str(name)
value = escape.native_str(value)
if re.search(r"[\x00-\x20]", name + value):
# Don't let us accidentally inject bad stuff
raise ValueError("Invalid cookie %r: %r" % (name, value))
if not hasattr(self, "_new_cookie"):
self._new_cookie = Cookie.SimpleCookie()
if name in self._new_cookie:
del self._new_cookie[name]
self._new_cookie[name] = value
morsel = self._new_cookie[name]
if domain:
morsel["domain"] = domain
if expires_days is not None and not expires:
expires = datetime.datetime.utcnow() + datetime.timedelta(
days=expires_days)
if expires:
morsel["expires"] = httputil.format_timestamp(expires)
if path:
morsel["path"] = path
for k, v in kwargs.items():
if k == 'max_age':
k = 'max-age'
# skip falsy values for httponly and secure flags because
# SimpleCookie sets them regardless
if k in ['httponly', 'secure'] and not v:
continue
morsel[k] = v
def clear_cookie(self, name, path="/", domain=None):
"""Deletes the cookie with the given name.
Due to limitations of the cookie protocol, you must pass the same
path and domain to clear a cookie as were used when that cookie
was set (but there is no way to find out on the server side
which values were used for a given cookie).
"""
expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
self.set_cookie(name, value="", path=path, expires=expires,
domain=domain)
def clear_all_cookies(self, path="/", domain=None):
"""Deletes all the cookies the user sent with this request.
See `clear_cookie` for more information on the path and domain
parameters.
.. versionchanged:: 3.2
Added the ``path`` and ``domain`` parameters.
"""
for name in self.request.cookies:
self.clear_cookie(name, path=path, domain=domain)
def set_secure_cookie(self, name, value, expires_days=30, version=None,
**kwargs):
"""Signs and timestamps a cookie so it cannot be forged.
You must specify the ``cookie_secret`` setting in your Application
to use this method. It should be a long, random sequence of bytes
to be used as the HMAC secret for the signature.
To read a cookie set with this method, use `get_secure_cookie()`.
Note that the ``expires_days`` parameter sets the lifetime of the
cookie in the browser, but is independent of the ``max_age_days``
parameter to `get_secure_cookie`.
Secure cookies may contain arbitrary byte values, not just unicode
strings (unlike regular cookies)
.. versionchanged:: 3.2.1
Added the ``version`` argument. Introduced cookie version 2
and made it the default.
"""
self.set_cookie(name, self.create_signed_value(name, value,
version=version),
expires_days=expires_days, **kwargs)
def create_signed_value(self, name, value, version=None):
"""Signs and timestamps a string so it cannot be forged.
Normally used via set_secure_cookie, but provided as a separate
method for non-cookie uses. To decode a value not stored
as a cookie use the optional value argument to get_secure_cookie.
.. versionchanged:: 3.2.1
Added the ``version`` argument. Introduced cookie version 2
and made it the default.
"""
self.require_setting("cookie_secret", "secure cookies")
secret = self.application.settings["cookie_secret"]
key_version = None
if isinstance(secret, dict):
if self.application.settings.get("key_version") is None:
raise Exception("key_version setting must be used for secret_key dicts")
key_version = self.application.settings["key_version"]
return create_signed_value(secret, name, value, version=version,
key_version=key_version)
def get_secure_cookie(self, name, value=None, max_age_days=31,
min_version=None):
"""Returns the given signed cookie if it validates, or None.
The decoded cookie value is returned as a byte string (unlike
`get_cookie`).
.. versionchanged:: 3.2.1
Added the ``min_version`` argument. Introduced cookie version 2;
both versions 1 and 2 are accepted by default.
"""
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
return decode_signed_value(self.application.settings["cookie_secret"],
name, value, max_age_days=max_age_days,
min_version=min_version)
def get_secure_cookie_key_version(self, name, value=None):
"""Returns the signing key version of the secure cookie.
The version is returned as int.
"""
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
return get_signature_key_version(value)
def redirect(self, url, permanent=False, status=None):
"""Sends a redirect to the given (optionally relative) URL.
If the ``status`` argument is specified, that value is used as the
HTTP status code; otherwise either 301 (permanent) or 302
(temporary) is chosen based on the ``permanent`` argument.
The default is 302 (temporary).
"""
if self._headers_written:
raise Exception("Cannot redirect after headers have been written")
if status is None:
status = 301 if permanent else 302
else:
assert isinstance(status, int) and 300 <= status <= 399
self.set_status(status)
self.set_header("Location", utf8(url))
self.finish()
def write(self, chunk):
"""Writes the given chunk to the output buffer.
To write the output to the network, use the flush() method below.
If the given chunk is a dictionary, we write it as JSON and set
the Content-Type of the response to be ``application/json``.
(if you want to send JSON as a different ``Content-Type``, call
set_header *after* calling write()).
Note that lists are not converted to JSON because of a potential
cross-site security vulnerability. All JSON output should be
wrapped in a dictionary. More details at
http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and
https://github.com/facebook/tornado/issues/1009
"""
if self._finished:
raise RuntimeError("Cannot write() after finish()")
if not isinstance(chunk, (bytes, unicode_type, dict)):
message = "write() only accepts bytes, unicode, and dict objects"
if isinstance(chunk, list):
message += ". Lists not accepted for security reasons; see http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write"
raise TypeError(message)
if isinstance(chunk, dict):
chunk = escape.json_encode(chunk)
self.set_header("Content-Type", "application/json; charset=UTF-8")
chunk = utf8(chunk)
self._write_buffer.append(chunk)
def render(self, template_name, **kwargs):
"""Renders the template with the given arguments as the response."""
if self._finished:
raise RuntimeError("Cannot render() after finish()")
html = self.render_string(template_name, **kwargs)
# Insert the additional JS and CSS added by the modules on the page
js_embed = []
js_files = []
css_embed = []
css_files = []
html_heads = []
html_bodies = []
for module in getattr(self, "_active_modules", {}).values():
embed_part = module.embedded_javascript()
if embed_part:
js_embed.append(utf8(embed_part))
file_part = module.javascript_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
js_files.append(file_part)
else:
js_files.extend(file_part)
embed_part = module.embedded_css()
if embed_part:
css_embed.append(utf8(embed_part))
file_part = module.css_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
css_files.append(file_part)
else:
css_files.extend(file_part)
head_part = module.html_head()
if head_part:
html_heads.append(utf8(head_part))
body_part = module.html_body()
if body_part:
html_bodies.append(utf8(body_part))
if js_files:
# Maintain order of JavaScript files given by modules
js = self.render_linked_js(js_files)
sloc = html.rindex(b'</body>')
html = html[:sloc] + utf8(js) + b'\n' + html[sloc:]
if js_embed:
js = self.render_embed_js(js_embed)
sloc = html.rindex(b'</body>')
html = html[:sloc] + js + b'\n' + html[sloc:]
if css_files:
css = self.render_linked_css(css_files)
hloc = html.index(b'</head>')
html = html[:hloc] + utf8(css) + b'\n' + html[hloc:]
if css_embed:
css = self.render_embed_css(css_embed)
hloc = html.index(b'</head>')
html = html[:hloc] + css + b'\n' + html[hloc:]
if html_heads:
hloc = html.index(b'</head>')
html = html[:hloc] + b''.join(html_heads) + b'\n' + html[hloc:]
if html_bodies:
hloc = html.index(b'</body>')
html = html[:hloc] + b''.join(html_bodies) + b'\n' + html[hloc:]
self.finish(html)
def render_linked_js(self, js_files):
"""Default method used to render the final js links for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
paths = []
unique_paths = set()
for path in js_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
return ''.join('<script src="' + escape.xhtml_escape(p) +
'" type="text/javascript"></script>'
for p in paths)
def render_embed_js(self, js_embed):
"""Default method used to render the final embedded js for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
return b'<script type="text/javascript">\n//<![CDATA[\n' + \
b'\n'.join(js_embed) + b'\n//]]>\n</script>'
def render_linked_css(self, css_files):
"""Default method used to render the final css links for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
paths = []
unique_paths = set()
for path in css_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
return ''.join('<link href="' + escape.xhtml_escape(p) + '" '
'type="text/css" rel="stylesheet"/>'
for p in paths)
def render_embed_css(self, css_embed):
"""Default method used to render the final embedded css for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
return b'<style type="text/css">\n' + b'\n'.join(css_embed) + \
b'\n</style>'
def render_string(self, template_name, **kwargs):
"""Generate the given template with the given arguments.
We return the generated byte string (in utf8). To generate and
write a template as a response, use render() above.
"""
# If no template_path is specified, use the path of the calling file
template_path = self.get_template_path()
if not template_path:
frame = sys._getframe(0)
web_file = frame.f_code.co_filename
while frame.f_code.co_filename == web_file:
frame = frame.f_back
template_path = os.path.dirname(frame.f_code.co_filename)
with RequestHandler._template_loader_lock:
if template_path not in RequestHandler._template_loaders:
loader = self.create_template_loader(template_path)
RequestHandler._template_loaders[template_path] = loader
else:
loader = RequestHandler._template_loaders[template_path]
t = loader.load(template_name)
namespace = self.get_template_namespace()
namespace.update(kwargs)
return t.generate(**namespace)
def get_template_namespace(self):
"""Returns a dictionary to be used as the default template namespace.
May be overridden by subclasses to add or modify values.
The results of this method will be combined with additional
defaults in the `tornado.template` module and keyword arguments
to `render` or `render_string`.
"""
namespace = dict(
handler=self,
request=self.request,
current_user=self.current_user,
locale=self.locale,
_=self.locale.translate,
pgettext=self.locale.pgettext,
static_url=self.static_url,
xsrf_form_html=self.xsrf_form_html,
reverse_url=self.reverse_url
)
namespace.update(self.ui)
return namespace
def create_template_loader(self, template_path):
"""Returns a new template loader for the given path.
May be overridden by subclasses. By default returns a
directory-based loader on the given path, using the
``autoescape`` and ``template_whitespace`` application
settings. If a ``template_loader`` application setting is
supplied, uses that instead.
"""
settings = self.application.settings
if "template_loader" in settings:
return settings["template_loader"]
kwargs = {}
if "autoescape" in settings:
# autoescape=None means "no escaping", so we have to be sure
# to only pass this kwarg if the user asked for it.
kwargs["autoescape"] = settings["autoescape"]
if "template_whitespace" in settings:
kwargs["whitespace"] = settings["template_whitespace"]
return template.Loader(template_path, **kwargs)
def flush(self, include_footers=False, callback=None):
"""Flushes the current output buffer to the network.
The ``callback`` argument, if given, can be used for flow control:
it will be run when all flushed data has been written to the socket.
Note that only one flush callback can be outstanding at a time;
if another flush occurs before the previous flush's callback
has been run, the previous callback will be discarded.
.. versionchanged:: 4.0
Now returns a `.Future` if no callback is given.
"""
chunk = b"".join(self._write_buffer)
self._write_buffer = []
if not self._headers_written:
self._headers_written = True
for transform in self._transforms:
self._status_code, self._headers, chunk = \
transform.transform_first_chunk(
self._status_code, self._headers,
chunk, include_footers)
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method == "HEAD":
chunk = None
# Finalize the cookie headers (which have been stored in a side
# object so an outgoing cookie could be overwritten before it
# is sent).
if hasattr(self, "_new_cookie"):
for cookie in self._new_cookie.values():
self.add_header("Set-Cookie", cookie.OutputString(None))
start_line = httputil.ResponseStartLine('',
self._status_code,
self._reason)
return self.request.connection.write_headers(
start_line, self._headers, chunk, callback=callback)
else:
for transform in self._transforms:
chunk = transform.transform_chunk(chunk, include_footers)
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method != "HEAD":
return self.request.connection.write(chunk, callback=callback)
else:
future = Future()
future.set_result(None)
return future
def finish(self, chunk=None):
"""Finishes this response, ending the HTTP request."""
if self._finished:
raise RuntimeError("finish() called twice")
if chunk is not None:
self.write(chunk)
# Automatically support ETags and add the Content-Length header if
# we have not flushed any content yet.
if not self._headers_written:
if (self._status_code == 200 and
self.request.method in ("GET", "HEAD") and
"Etag" not in self._headers):
self.set_etag_header()
if self.check_etag_header():
self._write_buffer = []
self.set_status(304)
if (self._status_code in (204, 304) or
(self._status_code >= 100 and self._status_code < 200)):
assert not self._write_buffer, "Cannot send body with %s" % self._status_code
self._clear_headers_for_304()
elif "Content-Length" not in self._headers:
content_length = sum(len(part) for part in self._write_buffer)
self.set_header("Content-Length", content_length)
if hasattr(self.request, "connection"):
# Now that the request is finished, clear the callback we
# set on the HTTPConnection (which would otherwise prevent the
# garbage collection of the RequestHandler when there
# are keepalive connections)
self.request.connection.set_close_callback(None)
self.flush(include_footers=True)
self.request.finish()
self._log()
self._finished = True
self.on_finish()
self._break_cycles()
def _break_cycles(self):
# Break up a reference cycle between this handler and the
# _ui_module closures to allow for faster GC on CPython.
self.ui = None
def send_error(self, status_code=500, **kwargs):
"""Sends the given HTTP error code to the browser.
If `flush()` has already been called, it is not possible to send
an error, so this method will simply terminate the response.
If output has been written but not yet flushed, it will be discarded
and replaced with the error page.
Override `write_error()` to customize the error page that is returned.
Additional keyword arguments are passed through to `write_error`.
"""
if self._headers_written:
gen_log.error("Cannot send error response after headers written")
if not self._finished:
# If we get an error between writing headers and finishing,
# we are unlikely to be able to finish due to a
# Content-Length mismatch. Try anyway to release the
# socket.
try:
self.finish()
except Exception:
gen_log.error("Failed to flush partial response",
exc_info=True)
return
self.clear()
reason = kwargs.get('reason')
if 'exc_info' in kwargs:
exception = kwargs['exc_info'][1]
if isinstance(exception, HTTPError) and exception.reason:
reason = exception.reason
self.set_status(status_code, reason=reason)
try:
self.write_error(status_code, **kwargs)
except Exception:
app_log.error("Uncaught exception in write_error", exc_info=True)
if not self._finished:
self.finish()
def write_error(self, status_code, **kwargs):
"""Override to implement custom error pages.
``write_error`` may call `write`, `render`, `set_header`, etc
to produce output as usual.
If this error was caused by an uncaught exception (including
HTTPError), an ``exc_info`` triple will be available as
``kwargs["exc_info"]``. Note that this exception may not be
the "current" exception for purposes of methods like
``sys.exc_info()`` or ``traceback.format_exc``.
"""
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
# in debug mode, try to send a traceback
self.set_header('Content-Type', 'text/plain')
for line in traceback.format_exception(*kwargs["exc_info"]):
self.write(line)
self.finish()
else:
self.finish("<html><title>%(code)d: %(message)s</title>"
"<body>%(code)d: %(message)s</body></html>" % {
"code": status_code,
"message": self._reason,
})
@property
def locale(self):
"""The locale for the current session.
Determined by either `get_user_locale`, which you can override to
set the locale based on, e.g., a user preference stored in a
database, or `get_browser_locale`, which uses the ``Accept-Language``
header.
.. versionchanged: 4.1
Added a property setter.
"""
if not hasattr(self, "_locale"):
self._locale = self.get_user_locale()
if not self._locale:
self._locale = self.get_browser_locale()
assert self._locale
return self._locale
@locale.setter
def locale(self, value):
self._locale = value
def get_user_locale(self):
"""Override to determine the locale from the authenticated user.
If None is returned, we fall back to `get_browser_locale()`.
This method should return a `tornado.locale.Locale` object,
most likely obtained via a call like ``tornado.locale.get("en")``
"""
return None
def get_browser_locale(self, default="en_US"):
"""Determines the user's locale from ``Accept-Language`` header.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
"""
if "Accept-Language" in self.request.headers:
languages = self.request.headers["Accept-Language"].split(",")
locales = []
for language in languages:
parts = language.strip().split(";")
if len(parts) > 1 and parts[1].startswith("q="):
try:
score = float(parts[1][2:])
except (ValueError, TypeError):
score = 0.0
else:
score = 1.0
locales.append((parts[0], score))
if locales:
locales.sort(key=lambda pair: pair[1], reverse=True)
codes = [l[0] for l in locales]
return locale.get(*codes)
return locale.get(default)
@property
def current_user(self):
"""The authenticated user for this request.
This is set in one of two ways:
* A subclass may override `get_current_user()`, which will be called
automatically the first time ``self.current_user`` is accessed.
`get_current_user()` will only be called once per request,
and is cached for future access::
def get_current_user(self):
user_cookie = self.get_secure_cookie("user")
if user_cookie:
return json.loads(user_cookie)
return None
* It may be set as a normal variable, typically from an overridden
`prepare()`::
@gen.coroutine
def prepare(self):
user_id_cookie = self.get_secure_cookie("user_id")
if user_id_cookie:
self.current_user = yield load_user(user_id_cookie)
Note that `prepare()` may be a coroutine while `get_current_user()`
may not, so the latter form is necessary if loading the user requires
asynchronous operations.
The user object may be any type of the application's choosing.
"""
if not hasattr(self, "_current_user"):
self._current_user = self.get_current_user()
return self._current_user
@current_user.setter
def current_user(self, value):
self._current_user = value
def get_current_user(self):
"""Override to determine the current user from, e.g., a cookie.
This method may not be a coroutine.
"""
return None
def get_login_url(self):
"""Override to customize the login URL based on the request.
By default, we use the ``login_url`` application setting.
"""
self.require_setting("login_url", "@tornado.web.authenticated")
return self.application.settings["login_url"]
def get_template_path(self):
"""Override to customize template path for each handler.
By default, we use the ``template_path`` application setting.
Return None to load templates relative to the calling file.
"""
return self.application.settings.get("template_path")
@property
def xsrf_token(self):
"""The XSRF-prevention token for the current user/session.
To prevent cross-site request forgery, we set an '_xsrf' cookie
and include the same '_xsrf' value as an argument with all POST
requests. If the two do not match, we reject the form submission
as a potential forgery.
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
.. versionchanged:: 3.2.2
The xsrf token will now be have a random mask applied in every
request, which makes it safe to include the token in pages
that are compressed. See http://breachattack.com for more
information on the issue fixed by this change. Old (version 1)
cookies will be converted to version 2 when this method is called
unless the ``xsrf_cookie_version`` `Application` setting is
set to 1.
.. versionchanged:: 4.3
The ``xsrf_cookie_kwargs`` `Application` setting may be
used to supply additional cookie options (which will be
passed directly to `set_cookie`). For example,
``xsrf_cookie_kwargs=dict(httponly=True, secure=True)``
will set the ``secure`` and ``httponly`` flags on the
``_xsrf`` cookie.
"""
if not hasattr(self, "_xsrf_token"):
version, token, timestamp = self._get_raw_xsrf_token()
output_version = self.settings.get("xsrf_cookie_version", 2)
cookie_kwargs = self.settings.get("xsrf_cookie_kwargs", {})
if output_version == 1:
self._xsrf_token = binascii.b2a_hex(token)
elif output_version == 2:
mask = os.urandom(4)
self._xsrf_token = b"|".join([
b"2",
binascii.b2a_hex(mask),
binascii.b2a_hex(_websocket_mask(mask, token)),
utf8(str(int(timestamp)))])
else:
raise ValueError("unknown xsrf cookie version %d",
output_version)
if version is None:
expires_days = 30 if self.current_user else None
self.set_cookie("_xsrf", self._xsrf_token,
expires_days=expires_days,
**cookie_kwargs)
return self._xsrf_token
def _get_raw_xsrf_token(self):
"""Read or generate the xsrf token in its raw form.
The raw_xsrf_token is a tuple containing:
* version: the version of the cookie from which this token was read,
or None if we generated a new token in this request.
* token: the raw token data; random (non-ascii) bytes.
* timestamp: the time this token was generated (will not be accurate
for version 1 cookies)
"""
if not hasattr(self, '_raw_xsrf_token'):
cookie = self.get_cookie("_xsrf")
if cookie:
version, token, timestamp = self._decode_xsrf_token(cookie)
else:
version, token, timestamp = None, None, None
if token is None:
version = None
token = os.urandom(16)
timestamp = time.time()
self._raw_xsrf_token = (version, token, timestamp)
return self._raw_xsrf_token
def _decode_xsrf_token(self, cookie):
"""Convert a cookie string into a the tuple form returned by
_get_raw_xsrf_token.
"""
try:
m = _signed_value_version_re.match(utf8(cookie))
if m:
version = int(m.group(1))
if version == 2:
_, mask, masked_token, timestamp = cookie.split("|")
mask = binascii.a2b_hex(utf8(mask))
token = _websocket_mask(
mask, binascii.a2b_hex(utf8(masked_token)))
timestamp = int(timestamp)
return version, token, timestamp
else:
# Treat unknown versions as not present instead of failing.
raise Exception("Unknown xsrf cookie version")
else:
version = 1
try:
token = binascii.a2b_hex(utf8(cookie))
except (binascii.Error, TypeError):
token = utf8(cookie)
# We don't have a usable timestamp in older versions.
timestamp = int(time.time())
return (version, token, timestamp)
except Exception:
# Catch exceptions and return nothing instead of failing.
gen_log.debug("Uncaught exception in _decode_xsrf_token",
exc_info=True)
return None, None, None
def check_xsrf_cookie(self):
"""Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument.
To prevent cross-site request forgery, we set an ``_xsrf``
cookie and include the same value as a non-cookie
field with all ``POST`` requests. If the two do not match, we
reject the form submission as a potential forgery.
The ``_xsrf`` value may be set as either a form field named ``_xsrf``
or in a custom HTTP header named ``X-XSRFToken`` or ``X-CSRFToken``
(the latter is accepted for compatibility with Django).
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
Prior to release 1.1.1, this check was ignored if the HTTP header
``X-Requested-With: XMLHTTPRequest`` was present. This exception
has been shown to be insecure and has been removed. For more
information please see
http://www.djangoproject.com/weblog/2011/feb/08/security/
http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails
.. versionchanged:: 3.2.2
Added support for cookie version 2. Both versions 1 and 2 are
supported.
"""
token = (self.get_argument("_xsrf", None) or
self.request.headers.get("X-Xsrftoken") or
self.request.headers.get("X-Csrftoken"))
if not token:
raise HTTPError(403, "'_xsrf' argument missing from POST")
_, token, _ = self._decode_xsrf_token(token)
_, expected_token, _ = self._get_raw_xsrf_token()
if not token:
raise HTTPError(403, "'_xsrf' argument has invalid format")
if not _time_independent_equals(utf8(token), utf8(expected_token)):
raise HTTPError(403, "XSRF cookie does not match POST argument")
def xsrf_form_html(self):
"""An HTML ``<input/>`` element to be included with all POST forms.
It defines the ``_xsrf`` input value, which we check on all POST
requests to prevent cross-site request forgery. If you have set
the ``xsrf_cookies`` application setting, you must include this
HTML within all of your HTML forms.
In a template, this method should be called with ``{% module
xsrf_form_html() %}``
See `check_xsrf_cookie()` above for more information.
"""
return '<input type="hidden" name="_xsrf" value="' + \
escape.xhtml_escape(self.xsrf_token) + '"/>'
def static_url(self, path, include_host=None, **kwargs):
"""Returns a static URL for the given relative static file path.
This method requires you set the ``static_path`` setting in your
application (which specifies the root directory of your static
files).
This method returns a versioned url (by default appending
``?v=<signature>``), which allows the static files to be
cached indefinitely. This can be disabled by passing
``include_version=False`` (in the default implementation;
other static file implementations are not required to support
this, but they may support other options).
By default this method returns URLs relative to the current
host, but if ``include_host`` is true the URL returned will be
absolute. If this handler has an ``include_host`` attribute,
that value will be used as the default for all `static_url`
calls that do not pass ``include_host`` as a keyword argument.
"""
self.require_setting("static_path", "static_url")
get_url = self.settings.get("static_handler_class",
StaticFileHandler).make_static_url
if include_host is None:
include_host = getattr(self, "include_host", False)
if include_host:
base = self.request.protocol + "://" + self.request.host
else:
base = ""
return base + get_url(self.settings, path, **kwargs)
def require_setting(self, name, feature="this feature"):
"""Raises an exception if the given app setting is not defined."""
if not self.application.settings.get(name):
raise Exception("You must define the '%s' setting in your "
"application to use %s" % (name, feature))
def reverse_url(self, name, *args):
"""Alias for `Application.reverse_url`."""
return self.application.reverse_url(name, *args)
def compute_etag(self):
"""Computes the etag header to be used for this request.
By default uses a hash of the content written so far.
May be overridden to provide custom etag implementations,
or may return None to disable tornado's default etag support.
"""
hasher = hashlib.sha1()
for part in self._write_buffer:
hasher.update(part)
return '"%s"' % hasher.hexdigest()
def set_etag_header(self):
"""Sets the response's Etag header using ``self.compute_etag()``.
Note: no header will be set if ``compute_etag()`` returns ``None``.
This method is called automatically when the request is finished.
"""
etag = self.compute_etag()
if etag is not None:
self.set_header("Etag", etag)
def check_etag_header(self):
"""Checks the ``Etag`` header against requests's ``If-None-Match``.
Returns ``True`` if the request's Etag matches and a 304 should be
returned. For example::
self.set_etag_header()
if self.check_etag_header():
self.set_status(304)
return
This method is called automatically when the request is finished,
but may be called earlier for applications that override
`compute_etag` and want to do an early check for ``If-None-Match``
before completing the request. The ``Etag`` header should be set
(perhaps with `set_etag_header`) before calling this method.
"""
computed_etag = utf8(self._headers.get("Etag", ""))
# Find all weak and strong etag values from If-None-Match header
# because RFC 7232 allows multiple etag values in a single header.
etags = re.findall(
br'\*|(?:W/)?"[^"]*"',
utf8(self.request.headers.get("If-None-Match", ""))
)
if not computed_etag or not etags:
return False
match = False
if etags[0] == b'*':
match = True
else:
# Use a weak comparison when comparing entity-tags.
def val(x):
return x[2:] if x.startswith(b'W/') else x
for etag in etags:
if val(etag) == val(computed_etag):
match = True
break
return match
def _stack_context_handle_exception(self, type, value, traceback):
try:
# For historical reasons _handle_request_exception only takes
# the exception value instead of the full triple,
# so re-raise the exception to ensure that it's in
# sys.exc_info()
raise_exc_info((type, value, traceback))
except Exception:
self._handle_request_exception(value)
return True
@gen.coroutine
def _execute(self, transforms, *args, **kwargs):
"""Executes this request with the given output transforms."""
self._transforms = transforms
try:
if self.request.method not in self.SUPPORTED_METHODS:
raise HTTPError(405)
self.path_args = [self.decode_argument(arg) for arg in args]
self.path_kwargs = dict((k, self.decode_argument(v, name=k))
for (k, v) in kwargs.items())
# If XSRF cookies are turned on, reject form submissions without
# the proper cookie
if self.request.method not in ("GET", "HEAD", "OPTIONS") and \
self.application.settings.get("xsrf_cookies"):
self.check_xsrf_cookie()
result = self.prepare()
if result is not None:
result = yield result
if self._prepared_future is not None:
# Tell the Application we've finished with prepare()
# and are ready for the body to arrive.
self._prepared_future.set_result(None)
if self._finished:
return
if _has_stream_request_body(self.__class__):
# In streaming mode request.body is a Future that signals
# the body has been completely received. The Future has no
# result; the data has been passed to self.data_received
# instead.
try:
yield self.request.body
except iostream.StreamClosedError:
return
method = getattr(self, self.request.method.lower())
result = method(*self.path_args, **self.path_kwargs)
if result is not None:
result = yield result
if self._auto_finish and not self._finished:
self.finish()
except Exception as e:
try:
self._handle_request_exception(e)
except Exception:
app_log.error("Exception in exception handler", exc_info=True)
if (self._prepared_future is not None and
not self._prepared_future.done()):
# In case we failed before setting _prepared_future, do it
# now (to unblock the HTTP server). Note that this is not
# in a finally block to avoid GC issues prior to Python 3.4.
self._prepared_future.set_result(None)
def data_received(self, chunk):
"""Implement this method to handle streamed request data.
Requires the `.stream_request_body` decorator.
"""
raise NotImplementedError()
def _log(self):
"""Logs the current request.
Sort of deprecated since this functionality was moved to the
Application, but left in place for the benefit of existing apps
that have overridden this method.
"""
self.application.log_request(self)
def _request_summary(self):
return "%s %s (%s)" % (self.request.method, self.request.uri,
self.request.remote_ip)
def _handle_request_exception(self, e):
if isinstance(e, Finish):
# Not an error; just finish the request without logging.
if not self._finished:
self.finish(*e.args)
return
try:
self.log_exception(*sys.exc_info())
except Exception:
# An error here should still get a best-effort send_error()
# to avoid leaking the connection.
app_log.error("Error in exception logger", exc_info=True)
if self._finished:
# Extra errors after the request has been finished should
# be logged, but there is no reason to continue to try and
# send a response.
return
if isinstance(e, HTTPError):
if e.status_code not in httputil.responses and not e.reason:
gen_log.error("Bad HTTP status code: %d", e.status_code)
self.send_error(500, exc_info=sys.exc_info())
else:
self.send_error(e.status_code, exc_info=sys.exc_info())
else:
self.send_error(500, exc_info=sys.exc_info())
def log_exception(self, typ, value, tb):
"""Override to customize logging of uncaught exceptions.
By default logs instances of `HTTPError` as warnings without
stack traces (on the ``tornado.general`` logger), and all
other exceptions as errors with stack traces (on the
``tornado.application`` logger).
.. versionadded:: 3.1
"""
if isinstance(value, HTTPError):
if value.log_message:
format = "%d %s: " + value.log_message
args = ([value.status_code, self._request_summary()] +
list(value.args))
gen_log.warning(format, *args)
else:
app_log.error("Uncaught exception %s\n%r", self._request_summary(),
self.request, exc_info=(typ, value, tb))
def _ui_module(self, name, module):
def render(*args, **kwargs):
if not hasattr(self, "_active_modules"):
self._active_modules = {}
if name not in self._active_modules:
self._active_modules[name] = module(self)
rendered = self._active_modules[name].render(*args, **kwargs)
return rendered
return render
def _ui_method(self, method):
return lambda *args, **kwargs: method(self, *args, **kwargs)
def _clear_headers_for_304(self):
# 304 responses should not contain entity headers (defined in
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.1)
# not explicitly allowed by
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
headers = ["Allow", "Content-Encoding", "Content-Language",
"Content-Length", "Content-MD5", "Content-Range",
"Content-Type", "Last-Modified"]
for h in headers:
self.clear_header(h)
def asynchronous(method):
"""Wrap request handler methods with this if they are asynchronous.
This decorator is for callback-style asynchronous methods; for
coroutines, use the ``@gen.coroutine`` decorator without
``@asynchronous``. (It is legal for legacy reasons to use the two
decorators together provided ``@asynchronous`` is first, but
``@asynchronous`` will be ignored in this case)
This decorator should only be applied to the :ref:`HTTP verb
methods <verbs>`; its behavior is undefined for any other method.
This decorator does not *make* a method asynchronous; it tells
the framework that the method *is* asynchronous. For this decorator
to be useful the method must (at least sometimes) do something
asynchronous.
If this decorator is given, the response is not finished when the
method returns. It is up to the request handler to call
`self.finish() <RequestHandler.finish>` to finish the HTTP
request. Without this decorator, the request is automatically
finished when the ``get()`` or ``post()`` method returns. Example:
.. testcode::
class MyRequestHandler(RequestHandler):
@asynchronous
def get(self):
http = httpclient.AsyncHTTPClient()
http.fetch("http://friendfeed.com/", self._on_download)
def _on_download(self, response):
self.write("Downloaded!")
self.finish()
.. testoutput::
:hide:
.. versionchanged:: 3.1
The ability to use ``@gen.coroutine`` without ``@asynchronous``.
.. versionchanged:: 4.3 Returning anything but ``None`` or a
yieldable object from a method decorated with ``@asynchronous``
is an error. Such return values were previously ignored silently.
"""
# Delay the IOLoop import because it's not available on app engine.
from salt.ext.tornado.ioloop import IOLoop
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
self._auto_finish = False
with stack_context.ExceptionStackContext(
self._stack_context_handle_exception):
result = method(self, *args, **kwargs)
if result is not None:
result = gen.convert_yielded(result)
# If @asynchronous is used with @gen.coroutine, (but
# not @gen.engine), we can automatically finish the
# request when the future resolves. Additionally,
# the Future will swallow any exceptions so we need
# to throw them back out to the stack context to finish
# the request.
def future_complete(f):
f.result()
if not self._finished:
self.finish()
IOLoop.current().add_future(result, future_complete)
# Once we have done this, hide the Future from our
# caller (i.e. RequestHandler._when_complete), which
# would otherwise set up its own callback and
# exception handler (resulting in exceptions being
# logged twice).
return None
return result
return wrapper
def stream_request_body(cls):
"""Apply to `RequestHandler` subclasses to enable streaming body support.
This decorator implies the following changes:
* `.HTTPServerRequest.body` is undefined, and body arguments will not
be included in `RequestHandler.get_argument`.
* `RequestHandler.prepare` is called when the request headers have been
read instead of after the entire body has been read.
* The subclass must define a method ``data_received(self, data):``, which
will be called zero or more times as data is available. Note that
if the request has an empty body, ``data_received`` may not be called.
* ``prepare`` and ``data_received`` may return Futures (such as via
``@gen.coroutine``, in which case the next method will not be called
until those futures have completed.
* The regular HTTP method (``post``, ``put``, etc) will be called after
the entire body has been read.
See the `file receiver demo <https://github.com/tornadoweb/tornado/tree/master/demos/file_upload/>`_
for example usage.
"""
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
cls._stream_request_body = True
return cls
def _has_stream_request_body(cls):
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
return getattr(cls, '_stream_request_body', False)
def removeslash(method):
"""Use this decorator to remove trailing slashes from the request path.
For example, a request to ``/foo/`` would redirect to ``/foo`` with this
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/*'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path.rstrip("/")
if uri: # don't try to redirect '/' to ''
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return
else:
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
def addslash(method):
"""Use this decorator to add a missing trailing slash to the request path.
For example, a request to ``/foo`` would redirect to ``/foo/`` with this
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/?'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path + "/"
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
class _ApplicationRouter(ReversibleRuleRouter):
"""Routing implementation used internally by `Application`.
Provides a binding between `Application` and `RequestHandler`.
This implementation extends `~.routing.ReversibleRuleRouter` in a couple of ways:
* it allows to use `RequestHandler` subclasses as `~.routing.Rule` target and
* it allows to use a list/tuple of rules as `~.routing.Rule` target.
``process_rule`` implementation will substitute this list with an appropriate
`_ApplicationRouter` instance.
"""
def __init__(self, application, rules=None):
assert isinstance(application, Application)
self.application = application
super(_ApplicationRouter, self).__init__(rules)
def process_rule(self, rule):
rule = super(_ApplicationRouter, self).process_rule(rule)
if isinstance(rule.target, (list, tuple)):
rule.target = _ApplicationRouter(self.application, rule.target)
return rule
def get_target_delegate(self, target, request, **target_params):
if isclass(target) and issubclass(target, RequestHandler):
return self.application.get_handler_delegate(request, target, **target_params)
return super(_ApplicationRouter, self).get_target_delegate(target, request, **target_params)
class Application(ReversibleRouter):
"""A collection of request handlers that make up a web application.
Instances of this class are callable and can be passed directly to
HTTPServer to serve the application::
application = web.Application([
(r"/", MainPageHandler),
])
http_server = httpserver.HTTPServer(application)
http_server.listen(8080)
ioloop.IOLoop.current().start()
The constructor for this class takes in a list of `~.routing.Rule`
objects or tuples of values corresponding to the arguments of
`~.routing.Rule` constructor: ``(matcher, target, [target_kwargs], [name])``,
the values in square brackets being optional. The default matcher is
`~.routing.PathMatches`, so ``(regexp, target)`` tuples can also be used
instead of ``(PathMatches(regexp), target)``.
A common routing target is a `RequestHandler` subclass, but you can also
use lists of rules as a target, which create a nested routing configuration::
application = web.Application([
(HostMatches("example.com"), [
(r"/", MainPageHandler),
(r"/feed", FeedHandler),
]),
])
In addition to this you can use nested `~.routing.Router` instances,
`~.httputil.HTTPMessageDelegate` subclasses and callables as routing targets
(see `~.routing` module docs for more information).
When we receive requests, we iterate over the list in order and
instantiate an instance of the first request class whose regexp
matches the request path. The request class can be specified as
either a class object or a (fully-qualified) name.
A dictionary may be passed as the third element (``target_kwargs``)
of the tuple, which will be used as keyword arguments to the handler's
constructor and `~RequestHandler.initialize` method. This pattern
is used for the `StaticFileHandler` in this example (note that a
`StaticFileHandler` can be installed automatically with the
static_path setting described below)::
application = web.Application([
(r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
We support virtual hosts with the `add_handlers` method, which takes in
a host regular expression as the first argument::
application.add_handlers(r"www\.myhost\.com", [
(r"/article/([0-9]+)", ArticleHandler),
])
If there's no match for the current request's host, then ``default_host``
parameter value is matched against host regular expressions.
You can serve static files by sending the ``static_path`` setting
as a keyword argument. We will serve those files from the
``/static/`` URI (this is configurable with the
``static_url_prefix`` setting), and we will serve ``/favicon.ico``
and ``/robots.txt`` from the same directory. A custom subclass of
`StaticFileHandler` can be specified with the
``static_handler_class`` setting.
.. versionchanged:: 4.5
Integration with the new `tornado.routing` module.
"""
def __init__(self, handlers=None, default_host=None, transforms=None,
**settings):
if transforms is None:
self.transforms = []
if settings.get("compress_response") or settings.get("gzip"):
self.transforms.append(GZipContentEncoding)
else:
self.transforms = transforms
self.default_host = default_host
self.settings = settings
self.ui_modules = {'linkify': _linkify,
'xsrf_form_html': _xsrf_form_html,
'Template': TemplateModule,
}
self.ui_methods = {}
self._load_ui_modules(settings.get("ui_modules", {}))
self._load_ui_methods(settings.get("ui_methods", {}))
if self.settings.get("static_path"):
path = self.settings["static_path"]
handlers = list(handlers or [])
static_url_prefix = settings.get("static_url_prefix",
"/static/")
static_handler_class = settings.get("static_handler_class",
StaticFileHandler)
static_handler_args = settings.get("static_handler_args", {})
static_handler_args['path'] = path
for pattern in [re.escape(static_url_prefix) + r"(.*)",
r"/(favicon\.ico)", r"/(robots\.txt)"]:
handlers.insert(0, (pattern, static_handler_class,
static_handler_args))
if self.settings.get('debug'):
self.settings.setdefault('autoreload', True)
self.settings.setdefault('compiled_template_cache', False)
self.settings.setdefault('static_hash_cache', False)
self.settings.setdefault('serve_traceback', True)
self.wildcard_router = _ApplicationRouter(self, handlers)
self.default_router = _ApplicationRouter(self, [
Rule(AnyMatches(), self.wildcard_router)
])
# Automatically reload modified modules
if self.settings.get('autoreload'):
from salt.ext.tornado import autoreload
autoreload.start()
def listen(self, port, address="", **kwargs):
"""Starts an HTTP server for this application on the given port.
This is a convenience alias for creating an `.HTTPServer`
object and calling its listen method. Keyword arguments not
supported by `HTTPServer.listen <.TCPServer.listen>` are passed to the
`.HTTPServer` constructor. For advanced uses
(e.g. multi-process mode), do not use this method; create an
`.HTTPServer` and call its
`.TCPServer.bind`/`.TCPServer.start` methods directly.
Note that after calling this method you still need to call
``IOLoop.current().start()`` to start the server.
Returns the `.HTTPServer` object.
.. versionchanged:: 4.3
Now returns the `.HTTPServer` object.
"""
# import is here rather than top level because HTTPServer
# is not importable on appengine
from salt.ext.tornado.httpserver import HTTPServer
server = HTTPServer(self, **kwargs)
server.listen(port, address)
return server
def add_handlers(self, host_pattern, host_handlers):
"""Appends the given handlers to our handler list.
Host patterns are processed sequentially in the order they were
added. All matching patterns will be considered.
"""
host_matcher = HostMatches(host_pattern)
rule = Rule(host_matcher, _ApplicationRouter(self, host_handlers))
self.default_router.rules.insert(-1, rule)
if self.default_host is not None:
self.wildcard_router.add_rules([(
DefaultHostMatches(self, host_matcher.host_pattern),
host_handlers
)])
def add_transform(self, transform_class):
self.transforms.append(transform_class)
def _load_ui_methods(self, methods):
if isinstance(methods, types.ModuleType):
self._load_ui_methods(dict((n, getattr(methods, n))
for n in dir(methods)))
elif isinstance(methods, list):
for m in methods:
self._load_ui_methods(m)
else:
for name, fn in methods.items():
if not name.startswith("_") and hasattr(fn, "__call__") \
and name[0].lower() == name[0]:
self.ui_methods[name] = fn
def _load_ui_modules(self, modules):
if isinstance(modules, types.ModuleType):
self._load_ui_modules(dict((n, getattr(modules, n))
for n in dir(modules)))
elif isinstance(modules, list):
for m in modules:
self._load_ui_modules(m)
else:
assert isinstance(modules, dict)
for name, cls in modules.items():
try:
if issubclass(cls, UIModule):
self.ui_modules[name] = cls
except TypeError:
pass
def __call__(self, request):
# Legacy HTTPServer interface
dispatcher = self.find_handler(request)
return dispatcher.execute()
def find_handler(self, request, **kwargs):
route = self.default_router.find_handler(request)
if route is not None:
return route
if self.settings.get('default_handler_class'):
return self.get_handler_delegate(
request,
self.settings['default_handler_class'],
self.settings.get('default_handler_args', {}))
return self.get_handler_delegate(
request, ErrorHandler, {'status_code': 404})
def get_handler_delegate(self, request, target_class, target_kwargs=None,
path_args=None, path_kwargs=None):
"""Returns `~.httputil.HTTPMessageDelegate` that can serve a request
for application and `RequestHandler` subclass.
:arg httputil.HTTPServerRequest request: current HTTP request.
:arg RequestHandler target_class: a `RequestHandler` class.
:arg dict target_kwargs: keyword arguments for ``target_class`` constructor.
:arg list path_args: positional arguments for ``target_class`` HTTP method that
will be executed while handling a request (``get``, ``post`` or any other).
:arg dict path_kwargs: keyword arguments for ``target_class`` HTTP method.
"""
return _HandlerDelegate(
self, request, target_class, target_kwargs, path_args, path_kwargs)
def reverse_url(self, name, *args):
"""Returns a URL path for handler named ``name``
The handler must be added to the application as a named `URLSpec`.
Args will be substituted for capturing groups in the `URLSpec` regex.
They will be converted to strings if necessary, encoded as utf8,
and url-escaped.
"""
reversed_url = self.default_router.reverse_url(name, *args)
if reversed_url is not None:
return reversed_url
raise KeyError("%s not found in named urls" % name)
def log_request(self, handler):
"""Writes a completed HTTP request to the logs.
By default writes to the python root logger. To change
this behavior either subclass Application and override this method,
or pass a function in the application settings dictionary as
``log_function``.
"""
if "log_function" in self.settings:
self.settings["log_function"](handler)
return
if handler.get_status() < 400:
log_method = access_log.info
elif handler.get_status() < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * handler.request.request_time()
log_method("%d %s %.2fms", handler.get_status(),
handler._request_summary(), request_time)
class _HandlerDelegate(httputil.HTTPMessageDelegate):
def __init__(self, application, request, handler_class, handler_kwargs,
path_args, path_kwargs):
self.application = application
self.connection = request.connection
self.request = request
self.handler_class = handler_class
self.handler_kwargs = handler_kwargs or {}
self.path_args = path_args or []
self.path_kwargs = path_kwargs or {}
self.chunks = []
self.stream_request_body = _has_stream_request_body(self.handler_class)
def headers_received(self, start_line, headers):
if self.stream_request_body:
self.request.body = Future()
return self.execute()
def data_received(self, data):
if self.stream_request_body:
return self.handler.data_received(data)
else:
self.chunks.append(data)
def finish(self):
if self.stream_request_body:
self.request.body.set_result(None)
else:
self.request.body = b''.join(self.chunks)
self.request._parse_body()
self.execute()
def on_connection_close(self):
if self.stream_request_body:
self.handler.on_connection_close()
else:
self.chunks = None
def execute(self):
# If template cache is disabled (usually in the debug mode),
# re-compile templates and reload static files on every
# request so you don't need to restart to see changes
if not self.application.settings.get("compiled_template_cache", True):
with RequestHandler._template_loader_lock:
for loader in RequestHandler._template_loaders.values():
loader.reset()
if not self.application.settings.get('static_hash_cache', True):
StaticFileHandler.reset()
self.handler = self.handler_class(self.application, self.request,
**self.handler_kwargs)
transforms = [t(self.request) for t in self.application.transforms]
if self.stream_request_body:
self.handler._prepared_future = Future()
# Note that if an exception escapes handler._execute it will be
# trapped in the Future it returns (which we are ignoring here,
# leaving it to be logged when the Future is GC'd).
# However, that shouldn't happen because _execute has a blanket
# except handler, and we cannot easily access the IOLoop here to
# call add_future (because of the requirement to remain compatible
# with WSGI)
self.handler._execute(transforms, *self.path_args,
**self.path_kwargs)
# If we are streaming the request body, then execute() is finished
# when the handler has prepared to receive the body. If not,
# it doesn't matter when execute() finishes (so we return None)
return self.handler._prepared_future
class HTTPError(Exception):
"""An exception that will turn into an HTTP error response.
Raising an `HTTPError` is a convenient alternative to calling
`RequestHandler.send_error` since it automatically ends the
current function.
To customize the response sent with an `HTTPError`, override
`RequestHandler.write_error`.
:arg int status_code: HTTP status code. Must be listed in
`httplib.responses <http.client.responses>` unless the ``reason``
keyword argument is given.
:arg string log_message: Message to be written to the log for this error
(will not be shown to the user unless the `Application` is in debug
mode). May contain ``%s``-style placeholders, which will be filled
in with remaining positional parameters.
:arg string reason: Keyword-only argument. The HTTP "reason" phrase
to pass in the status line along with ``status_code``. Normally
determined automatically from ``status_code``, but can be used
to use a non-standard numeric code.
"""
def __init__(self, status_code=500, log_message=None, *args, **kwargs):
self.status_code = status_code
self.log_message = log_message
self.args = args
self.reason = kwargs.get('reason', None)
if log_message and not args:
self.log_message = log_message.replace('%', '%%')
def __str__(self):
message = "HTTP %d: %s" % (
self.status_code,
self.reason or httputil.responses.get(self.status_code, 'Unknown'))
if self.log_message:
return message + " (" + (self.log_message % self.args) + ")"
else:
return message
class Finish(Exception):
"""An exception that ends the request without producing an error response.
When `Finish` is raised in a `RequestHandler`, the request will
end (calling `RequestHandler.finish` if it hasn't already been
called), but the error-handling methods (including
`RequestHandler.write_error`) will not be called.
If `Finish()` was created with no arguments, the pending response
will be sent as-is. If `Finish()` was given an argument, that
argument will be passed to `RequestHandler.finish()`.
This can be a more convenient way to implement custom error pages
than overriding ``write_error`` (especially in library code)::
if self.current_user is None:
self.set_status(401)
self.set_header('WWW-Authenticate', 'Basic realm="something"')
raise Finish()
.. versionchanged:: 4.3
Arguments passed to ``Finish()`` will be passed on to
`RequestHandler.finish`.
"""
pass
class MissingArgumentError(HTTPError):
"""Exception raised by `RequestHandler.get_argument`.
This is a subclass of `HTTPError`, so if it is uncaught a 400 response
code will be used instead of 500 (and a stack trace will not be logged).
.. versionadded:: 3.1
"""
def __init__(self, arg_name):
super(MissingArgumentError, self).__init__(
400, 'Missing argument %s' % arg_name)
self.arg_name = arg_name
class ErrorHandler(RequestHandler):
"""Generates an error response with ``status_code`` for all requests."""
def initialize(self, status_code):
self.set_status(status_code)
def prepare(self):
raise HTTPError(self._status_code)
def check_xsrf_cookie(self):
# POSTs to an ErrorHandler don't actually have side effects,
# so we don't need to check the xsrf token. This allows POSTs
# to the wrong url to return a 404 instead of 403.
pass
class RedirectHandler(RequestHandler):
"""Redirects the client to the given URL for all GET requests.
You should provide the keyword argument ``url`` to the handler, e.g.::
application = web.Application([
(r"/oldpath", web.RedirectHandler, {"url": "/newpath"}),
])
`RedirectHandler` supports regular expression substitutions. E.g., to
swap the first and second parts of a path while preserving the remainder::
application = web.Application([
(r"/(.*?)/(.*?)/(.*)", web.RedirectHandler, {"url": "/{1}/{0}/{2}"}),
])
The final URL is formatted with `str.format` and the substrings that match
the capturing groups. In the above example, a request to "/a/b/c" would be
formatted like::
str.format("/{1}/{0}/{2}", "a", "b", "c") # -> "/b/a/c"
Use Python's :ref:`format string syntax <formatstrings>` to customize how
values are substituted.
.. versionchanged:: 4.5
Added support for substitutions into the destination URL.
"""
def initialize(self, url, permanent=True):
self._url = url
self._permanent = permanent
def get(self, *args):
self.redirect(self._url.format(*args), permanent=self._permanent)
class StaticFileHandler(RequestHandler):
"""A simple handler that can serve static content from a directory.
A `StaticFileHandler` is configured automatically if you pass the
``static_path`` keyword argument to `Application`. This handler
can be customized with the ``static_url_prefix``, ``static_handler_class``,
and ``static_handler_args`` settings.
To map an additional path to this handler for a static data directory
you would add a line to your application like::
application = web.Application([
(r"/content/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
The handler constructor requires a ``path`` argument, which specifies the
local root directory of the content to be served.
Note that a capture group in the regex is required to parse the value for
the ``path`` argument to the get() method (different than the constructor
argument above); see `URLSpec` for details.
To serve a file like ``index.html`` automatically when a directory is
requested, set ``static_handler_args=dict(default_filename="index.html")``
in your application settings, or add ``default_filename`` as an initializer
argument for your ``StaticFileHandler``.
To maximize the effectiveness of browser caching, this class supports
versioned urls (by default using the argument ``?v=``). If a version
is given, we instruct the browser to cache this file indefinitely.
`make_static_url` (also available as `RequestHandler.static_url`) can
be used to construct a versioned url.
This handler is intended primarily for use in development and light-duty
file serving; for heavy traffic it will be more efficient to use
a dedicated static file server (such as nginx or Apache). We support
the HTTP ``Accept-Ranges`` mechanism to return partial content (because
some browsers require this functionality to be present to seek in
HTML5 audio or video).
**Subclassing notes**
This class is designed to be extensible by subclassing, but because
of the way static urls are generated with class methods rather than
instance methods, the inheritance patterns are somewhat unusual.
Be sure to use the ``@classmethod`` decorator when overriding a
class method. Instance methods may use the attributes ``self.path``
``self.absolute_path``, and ``self.modified``.
Subclasses should only override methods discussed in this section;
overriding other methods is error-prone. Overriding
``StaticFileHandler.get`` is particularly problematic due to the
tight coupling with ``compute_etag`` and other methods.
To change the way static urls are generated (e.g. to match the behavior
of another server or CDN), override `make_static_url`, `parse_url_path`,
`get_cache_time`, and/or `get_version`.
To replace all interaction with the filesystem (e.g. to serve
static content from a database), override `get_content`,
`get_content_size`, `get_modified_time`, `get_absolute_path`, and
`validate_absolute_path`.
.. versionchanged:: 3.1
Many of the methods for subclasses were added in Tornado 3.1.
"""
CACHE_MAX_AGE = 86400 * 365 * 10 # 10 years
_static_hashes = {} # type: typing.Dict
_lock = threading.Lock() # protects _static_hashes
def initialize(self, path, default_filename=None):
self.root = path
self.default_filename = default_filename
@classmethod
def reset(cls):
with cls._lock:
cls._static_hashes = {}
def head(self, path):
return self.get(path, include_body=False)
@gen.coroutine
def get(self, path, include_body=True):
# Set up our path instance variables.
self.path = self.parse_url_path(path)
del path # make sure we don't refer to path instead of self.path again
absolute_path = self.get_absolute_path(self.root, self.path)
self.absolute_path = self.validate_absolute_path(
self.root, absolute_path)
if self.absolute_path is None:
return
self.modified = self.get_modified_time()
self.set_headers()
if self.should_return_304():
self.set_status(304)
return
request_range = None
range_header = self.request.headers.get("Range")
if range_header:
# As per RFC 2616 14.16, if an invalid Range header is specified,
# the request will be treated as if the header didn't exist.
request_range = httputil._parse_request_range(range_header)
size = self.get_content_size()
if request_range:
start, end = request_range
if (start is not None and start >= size) or end == 0:
# As per RFC 2616 14.35.1, a range is not satisfiable only: if
# the first requested byte is equal to or greater than the
# content, or when a suffix with length 0 is specified
self.set_status(416) # Range Not Satisfiable
self.set_header("Content-Type", "text/plain")
self.set_header("Content-Range", "bytes */%s" % (size, ))
return
if start is not None and start < 0:
start += size
if end is not None and end > size:
# Clients sometimes blindly use a large range to limit their
# download size; cap the endpoint at the actual file size.
end = size
# Note: only return HTTP 206 if less than the entire range has been
# requested. Not only is this semantically correct, but Chrome
# refuses to play audio if it gets an HTTP 206 in response to
# ``Range: bytes=0-``.
if size != (end or size) - (start or 0):
self.set_status(206) # Partial Content
self.set_header("Content-Range",
httputil._get_content_range(start, end, size))
else:
start = end = None
if start is not None and end is not None:
content_length = end - start
elif end is not None:
content_length = end
elif start is not None:
content_length = size - start
else:
content_length = size
self.set_header("Content-Length", content_length)
if include_body:
content = self.get_content(self.absolute_path, start, end)
if isinstance(content, bytes):
content = [content]
for chunk in content:
try:
self.write(chunk)
yield self.flush()
except iostream.StreamClosedError:
return
else:
assert self.request.method == "HEAD"
def compute_etag(self):
"""Sets the ``Etag`` header based on static url version.
This allows efficient ``If-None-Match`` checks against cached
versions, and sends the correct ``Etag`` for a partial response
(i.e. the same ``Etag`` as the full file).
.. versionadded:: 3.1
"""
version_hash = self._get_cached_version(self.absolute_path)
if not version_hash:
return None
return '"%s"' % (version_hash, )
def set_headers(self):
"""Sets the content and caching headers on the response.
.. versionadded:: 3.1
"""
self.set_header("Accept-Ranges", "bytes")
self.set_etag_header()
if self.modified is not None:
self.set_header("Last-Modified", self.modified)
content_type = self.get_content_type()
if content_type:
self.set_header("Content-Type", content_type)
cache_time = self.get_cache_time(self.path, self.modified,
content_type)
if cache_time > 0:
self.set_header("Expires", datetime.datetime.utcnow() +
datetime.timedelta(seconds=cache_time))
self.set_header("Cache-Control", "max-age=" + str(cache_time))
self.set_extra_headers(self.path)
def should_return_304(self):
"""Returns True if the headers indicate that we should return 304.
.. versionadded:: 3.1
"""
if self.check_etag_header():
return True
# Check the If-Modified-Since, and don't send the result if the
# content has not been modified
ims_value = self.request.headers.get("If-Modified-Since")
if ims_value is not None:
date_tuple = email.utils.parsedate(ims_value)
if date_tuple is not None:
if_since = datetime.datetime(*date_tuple[:6])
if if_since >= self.modified:
return True
return False
@classmethod
def get_absolute_path(cls, root, path):
"""Returns the absolute location of ``path`` relative to ``root``.
``root`` is the path configured for this `StaticFileHandler`
(in most cases the ``static_path`` `Application` setting).
This class method may be overridden in subclasses. By default
it returns a filesystem path, but other strings may be used
as long as they are unique and understood by the subclass's
overridden `get_content`.
.. versionadded:: 3.1
"""
abspath = os.path.abspath(os.path.join(root, path))
return abspath
def validate_absolute_path(self, root, absolute_path):
"""Validate and return the absolute path.
``root`` is the configured path for the `StaticFileHandler`,
and ``path`` is the result of `get_absolute_path`
This is an instance method called during request processing,
so it may raise `HTTPError` or use methods like
`RequestHandler.redirect` (return None after redirecting to
halt further processing). This is where 404 errors for missing files
are generated.
This method may modify the path before returning it, but note that
any such modifications will not be understood by `make_static_url`.
In instance methods, this method's result is available as
``self.absolute_path``.
.. versionadded:: 3.1
"""
# os.path.abspath strips a trailing /.
# We must add it back to `root` so that we only match files
# in a directory named `root` instead of files starting with
# that prefix.
root = os.path.abspath(root)
if not root.endswith(os.path.sep):
# abspath always removes a trailing slash, except when
# root is '/'. This is an unusual case, but several projects
# have independently discovered this technique to disable
# Tornado's path validation and (hopefully) do their own,
# so we need to support it.
root += os.path.sep
# The trailing slash also needs to be temporarily added back
# the requested path so a request to root/ will match.
if not (absolute_path + os.path.sep).startswith(root):
raise HTTPError(403, "%s is not in root static directory",
self.path)
if (os.path.isdir(absolute_path) and
self.default_filename is not None):
# need to look at the request.path here for when path is empty
# but there is some prefix to the path that was already
# trimmed by the routing
if not self.request.path.endswith("/"):
self.redirect(self.request.path + "/", permanent=True)
return
absolute_path = os.path.join(absolute_path, self.default_filename)
if not os.path.exists(absolute_path):
raise HTTPError(404)
if not os.path.isfile(absolute_path):
raise HTTPError(403, "%s is not a file", self.path)
return absolute_path
@classmethod
def get_content(cls, abspath, start=None, end=None):
"""Retrieve the content of the requested resource which is located
at the given absolute path.
This class method may be overridden by subclasses. Note that its
signature is different from other overridable class methods
(no ``settings`` argument); this is deliberate to ensure that
``abspath`` is able to stand on its own as a cache key.
This method should either return a byte string or an iterator
of byte strings. The latter is preferred for large files
as it helps reduce memory fragmentation.
.. versionadded:: 3.1
"""
with open(abspath, "rb") as file:
if start is not None:
file.seek(start)
if end is not None:
remaining = end - (start or 0)
else:
remaining = None
while True:
chunk_size = 64 * 1024
if remaining is not None and remaining < chunk_size:
chunk_size = remaining
chunk = file.read(chunk_size)
if chunk:
if remaining is not None:
remaining -= len(chunk)
yield chunk
else:
if remaining is not None:
assert remaining == 0
return
@classmethod
def get_content_version(cls, abspath):
"""Returns a version string for the resource at the given path.
This class method may be overridden by subclasses. The
default implementation is a hash of the file's contents.
.. versionadded:: 3.1
"""
data = cls.get_content(abspath)
hasher = hashlib.md5()
if isinstance(data, bytes):
hasher.update(data)
else:
for chunk in data:
hasher.update(chunk)
return hasher.hexdigest()
def _stat(self):
if not hasattr(self, '_stat_result'):
self._stat_result = os.stat(self.absolute_path)
return self._stat_result
def get_content_size(self):
"""Retrieve the total size of the resource at the given path.
This method may be overridden by subclasses.
.. versionadded:: 3.1
.. versionchanged:: 4.0
This method is now always called, instead of only when
partial results are requested.
"""
stat_result = self._stat()
return stat_result[stat.ST_SIZE]
def get_modified_time(self):
"""Returns the time that ``self.absolute_path`` was last modified.
May be overridden in subclasses. Should return a `~datetime.datetime`
object or None.
.. versionadded:: 3.1
"""
stat_result = self._stat()
modified = datetime.datetime.utcfromtimestamp(
stat_result[stat.ST_MTIME])
return modified
def get_content_type(self):
"""Returns the ``Content-Type`` header to be used for this request.
.. versionadded:: 3.1
"""
mime_type, encoding = mimetypes.guess_type(self.absolute_path)
# per RFC 6713, use the appropriate type for a gzip compressed file
if encoding == "gzip":
return "application/gzip"
# As of 2015-07-21 there is no bzip2 encoding defined at
# http://www.iana.org/assignments/media-types/media-types.xhtml
# So for that (and any other encoding), use octet-stream.
elif encoding is not None:
return "application/octet-stream"
elif mime_type is not None:
return mime_type
# if mime_type not detected, use application/octet-stream
else:
return "application/octet-stream"
def set_extra_headers(self, path):
"""For subclass to add extra headers to the response"""
pass
def get_cache_time(self, path, modified, mime_type):
"""Override to customize cache control behavior.
Return a positive number of seconds to make the result
cacheable for that amount of time or 0 to mark resource as
cacheable for an unspecified amount of time (subject to
browser heuristics).
By default returns cache expiry of 10 years for resources requested
with ``v`` argument.
"""
return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0
@classmethod
def make_static_url(cls, settings, path, include_version=True):
"""Constructs a versioned url for the given path.
This method may be overridden in subclasses (but note that it
is a class method rather than an instance method). Subclasses
are only required to implement the signature
``make_static_url(cls, settings, path)``; other keyword
arguments may be passed through `~RequestHandler.static_url`
but are not standard.
``settings`` is the `Application.settings` dictionary. ``path``
is the static path being requested. The url returned should be
relative to the current host.
``include_version`` determines whether the generated URL should
include the query string containing the version hash of the
file corresponding to the given ``path``.
"""
url = settings.get('static_url_prefix', '/static/') + path
if not include_version:
return url
version_hash = cls.get_version(settings, path)
if not version_hash:
return url
return '%s?v=%s' % (url, version_hash)
def parse_url_path(self, url_path):
"""Converts a static URL path into a filesystem path.
``url_path`` is the path component of the URL with
``static_url_prefix`` removed. The return value should be
filesystem path relative to ``static_path``.
This is the inverse of `make_static_url`.
"""
if os.path.sep != "/":
url_path = url_path.replace("/", os.path.sep)
return url_path
@classmethod
def get_version(cls, settings, path):
"""Generate the version string to be used in static URLs.
``settings`` is the `Application.settings` dictionary and ``path``
is the relative location of the requested asset on the filesystem.
The returned value should be a string, or ``None`` if no version
could be determined.
.. versionchanged:: 3.1
This method was previously recommended for subclasses to override;
`get_content_version` is now preferred as it allows the base
class to handle caching of the result.
"""
abs_path = cls.get_absolute_path(settings['static_path'], path)
return cls._get_cached_version(abs_path)
@classmethod
def _get_cached_version(cls, abs_path):
with cls._lock:
hashes = cls._static_hashes
if abs_path not in hashes:
try:
hashes[abs_path] = cls.get_content_version(abs_path)
except Exception:
gen_log.error("Could not open static file %r", abs_path)
hashes[abs_path] = None
hsh = hashes.get(abs_path)
if hsh:
return hsh
return None
class FallbackHandler(RequestHandler):
"""A `RequestHandler` that wraps another HTTP server callback.
The fallback is a callable object that accepts an
`~.httputil.HTTPServerRequest`, such as an `Application` or
`tornado.wsgi.WSGIContainer`. This is most useful to use both
Tornado ``RequestHandlers`` and WSGI in the same server. Typical
usage::
wsgi_app = tornado.wsgi.WSGIContainer(
django.core.handlers.wsgi.WSGIHandler())
application = tornado.web.Application([
(r"/foo", FooHandler),
(r".*", FallbackHandler, dict(fallback=wsgi_app),
])
"""
def initialize(self, fallback):
self.fallback = fallback
def prepare(self):
self.fallback(self.request)
self._finished = True
class OutputTransform(object):
"""A transform modifies the result of an HTTP request (e.g., GZip encoding)
Applications are not expected to create their own OutputTransforms
or interact with them directly; the framework chooses which transforms
(if any) to apply.
"""
def __init__(self, request):
pass
def transform_first_chunk(self, status_code, headers, chunk, finishing):
# type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes]
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
return chunk
class GZipContentEncoding(OutputTransform):
"""Applies the gzip content encoding to the response.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
.. versionchanged:: 4.0
Now compresses all mime types beginning with ``text/``, instead
of just a whitelist. (the whitelist is still used for certain
non-text mime types).
"""
# Whitelist of compressible mime types (in addition to any types
# beginning with "text/").
CONTENT_TYPES = set(["application/javascript", "application/x-javascript",
"application/xml", "application/atom+xml",
"application/json", "application/xhtml+xml",
"image/svg+xml"])
# Python's GzipFile defaults to level 9, while most other gzip
# tools (including gzip itself) default to 6, which is probably a
# better CPU/size tradeoff.
GZIP_LEVEL = 6
# Responses that are too short are unlikely to benefit from gzipping
# after considering the "Content-Encoding: gzip" header and the header
# inside the gzip encoding.
# Note that responses written in multiple chunks will be compressed
# regardless of size.
MIN_LENGTH = 1024
def __init__(self, request):
self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "")
def _compressible_type(self, ctype):
return ctype.startswith('text/') or ctype in self.CONTENT_TYPES
def transform_first_chunk(self, status_code, headers, chunk, finishing):
# type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes]
# TODO: can/should this type be inherited from the superclass?
if 'Vary' in headers:
headers['Vary'] += ', Accept-Encoding'
else:
headers['Vary'] = 'Accept-Encoding'
if self._gzipping:
ctype = _unicode(headers.get("Content-Type", "")).split(";")[0]
self._gzipping = self._compressible_type(ctype) and \
(not finishing or len(chunk) >= self.MIN_LENGTH) and \
("Content-Encoding" not in headers)
if self._gzipping:
headers["Content-Encoding"] = "gzip"
self._gzip_value = BytesIO()
self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value,
compresslevel=self.GZIP_LEVEL)
chunk = self.transform_chunk(chunk, finishing)
if "Content-Length" in headers:
# The original content length is no longer correct.
# If this is the last (and only) chunk, we can set the new
# content-length; otherwise we remove it and fall back to
# chunked encoding.
if finishing:
headers["Content-Length"] = str(len(chunk))
else:
del headers["Content-Length"]
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
if self._gzipping:
self._gzip_file.write(chunk)
if finishing:
self._gzip_file.close()
else:
self._gzip_file.flush()
chunk = self._gzip_value.getvalue()
self._gzip_value.truncate(0)
self._gzip_value.seek(0)
return chunk
def authenticated(method):
"""Decorate methods with this to require that the user be logged in.
If the user is not logged in, they will be redirected to the configured
`login url <RequestHandler.get_login_url>`.
If you configure a login url with a query parameter, Tornado will
assume you know what you're doing and use it as-is. If not, it
will add a `next` parameter so the login page knows where to send
you once you're logged in.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.current_user:
if self.request.method in ("GET", "HEAD"):
url = self.get_login_url()
if "?" not in url:
if urlparse.urlsplit(url).scheme:
# if login url is absolute, make next absolute too
next_url = self.request.full_url()
else:
next_url = self.request.uri
url += "?" + urlencode(dict(next=next_url))
self.redirect(url)
return
raise HTTPError(403)
return method(self, *args, **kwargs)
return wrapper
class UIModule(object):
"""A re-usable, modular UI unit on a page.
UI modules often execute additional queries, and they can include
additional CSS and JavaScript that will be included in the output
page, which is automatically inserted on page render.
Subclasses of UIModule must override the `render` method.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.ui = handler.ui
self.locale = handler.locale
@property
def current_user(self):
return self.handler.current_user
def render(self, *args, **kwargs):
"""Override in subclasses to return this module's output."""
raise NotImplementedError()
def embedded_javascript(self):
"""Override to return a JavaScript string
to be embedded in the page."""
return None
def javascript_files(self):
"""Override to return a list of JavaScript files needed by this module.
If the return values are relative paths, they will be passed to
`RequestHandler.static_url`; otherwise they will be used as-is.
"""
return None
def embedded_css(self):
"""Override to return a CSS string
that will be embedded in the page."""
return None
def css_files(self):
"""Override to returns a list of CSS files required by this module.
If the return values are relative paths, they will be passed to
`RequestHandler.static_url`; otherwise they will be used as-is.
"""
return None
def html_head(self):
"""Override to return an HTML string that will be put in the <head/>
element.
"""
return None
def html_body(self):
"""Override to return an HTML string that will be put at the end of
the <body/> element.
"""
return None
def render_string(self, path, **kwargs):
"""Renders a template and returns it as a string."""
return self.handler.render_string(path, **kwargs)
class _linkify(UIModule):
def render(self, text, **kwargs):
return escape.linkify(text, **kwargs)
class _xsrf_form_html(UIModule):
def render(self):
return self.handler.xsrf_form_html()
class TemplateModule(UIModule):
"""UIModule that simply renders the given template.
{% module Template("foo.html") %} is similar to {% include "foo.html" %},
but the module version gets its own namespace (with kwargs passed to
Template()) instead of inheriting the outer template's namespace.
Templates rendered through this module also get access to UIModule's
automatic javascript/css features. Simply call set_resources
inside the template and give it keyword arguments corresponding to
the methods on UIModule: {{ set_resources(js_files=static_url("my.js")) }}
Note that these resources are output once per template file, not once
per instantiation of the template, so they must not depend on
any arguments to the template.
"""
def __init__(self, handler):
super(TemplateModule, self).__init__(handler)
# keep resources in both a list and a dict to preserve order
self._resource_list = []
self._resource_dict = {}
def render(self, path, **kwargs):
def set_resources(**kwargs):
if path not in self._resource_dict:
self._resource_list.append(kwargs)
self._resource_dict[path] = kwargs
else:
if self._resource_dict[path] != kwargs:
raise ValueError("set_resources called with different "
"resources for the same template")
return ""
return self.render_string(path, set_resources=set_resources,
**kwargs)
def _get_resources(self, key):
return (r[key] for r in self._resource_list if key in r)
def embedded_javascript(self):
return "\n".join(self._get_resources("embedded_javascript"))
def javascript_files(self):
result = []
for f in self._get_resources("javascript_files"):
if isinstance(f, (unicode_type, bytes)):
result.append(f)
else:
result.extend(f)
return result
def embedded_css(self):
return "\n".join(self._get_resources("embedded_css"))
def css_files(self):
result = []
for f in self._get_resources("css_files"):
if isinstance(f, (unicode_type, bytes)):
result.append(f)
else:
result.extend(f)
return result
def html_head(self):
return "".join(self._get_resources("html_head"))
def html_body(self):
return "".join(self._get_resources("html_body"))
class _UIModuleNamespace(object):
"""Lazy namespace which creates UIModule proxies bound to a handler."""
def __init__(self, handler, ui_modules):
self.handler = handler
self.ui_modules = ui_modules
def __getitem__(self, key):
return self.handler._ui_module(key, self.ui_modules[key])
def __getattr__(self, key):
try:
return self[key]
except KeyError as e:
raise AttributeError(str(e))
if hasattr(hmac, 'compare_digest'): # python 3.3
_time_independent_equals = hmac.compare_digest
else:
def _time_independent_equals(a, b):
if len(a) != len(b):
return False
result = 0
if isinstance(a[0], int): # python3 byte strings
for x, y in zip(a, b):
result |= x ^ y
else: # python2
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def create_signed_value(secret, name, value, version=None, clock=None,
key_version=None):
if version is None:
version = DEFAULT_SIGNED_VALUE_VERSION
if clock is None:
clock = time.time
timestamp = utf8(str(int(clock())))
value = base64.b64encode(utf8(value))
if version == 1:
signature = _create_signature_v1(secret, name, value, timestamp)
value = b"|".join([value, timestamp, signature])
return value
elif version == 2:
# The v2 format consists of a version number and a series of
# length-prefixed fields "%d:%s", the last of which is a
# signature, all separated by pipes. All numbers are in
# decimal format with no leading zeros. The signature is an
# HMAC-SHA256 of the whole string up to that point, including
# the final pipe.
#
# The fields are:
# - format version (i.e. 2; no length prefix)
# - key version (integer, default is 0)
# - timestamp (integer seconds since epoch)
# - name (not encoded; assumed to be ~alphanumeric)
# - value (base64-encoded)
# - signature (hex-encoded; no length prefix)
def format_field(s):
return utf8("%d:" % len(s)) + utf8(s)
to_sign = b"|".join([
b"2",
format_field(str(key_version or 0)),
format_field(timestamp),
format_field(name),
format_field(value),
b''])
if isinstance(secret, dict):
assert key_version is not None, 'Key version must be set when sign key dict is used'
assert version >= 2, 'Version must be at least 2 for key version support'
secret = secret[key_version]
signature = _create_signature_v2(secret, to_sign)
return to_sign + signature
else:
raise ValueError("Unsupported version %d" % version)
# A leading version number in decimal
# with no leading zeros, followed by a pipe.
_signed_value_version_re = re.compile(br"^([1-9][0-9]*)\|(.*)$")
def _get_version(value):
# Figures out what version value is. Version 1 did not include an
# explicit version field and started with arbitrary base64 data,
# which makes this tricky.
m = _signed_value_version_re.match(value)
if m is None:
version = 1
else:
try:
version = int(m.group(1))
if version > 999:
# Certain payloads from the version-less v1 format may
# be parsed as valid integers. Due to base64 padding
# restrictions, this can only happen for numbers whose
# length is a multiple of 4, so we can treat all
# numbers up to 999 as versions, and for the rest we
# fall back to v1 format.
version = 1
except ValueError:
version = 1
return version
def decode_signed_value(secret, name, value, max_age_days=31,
clock=None, min_version=None):
if clock is None:
clock = time.time
if min_version is None:
min_version = DEFAULT_SIGNED_VALUE_MIN_VERSION
if min_version > 2:
raise ValueError("Unsupported min_version %d" % min_version)
if not value:
return None
value = utf8(value)
version = _get_version(value)
if version < min_version:
return None
if version == 1:
return _decode_signed_value_v1(secret, name, value,
max_age_days, clock)
elif version == 2:
return _decode_signed_value_v2(secret, name, value,
max_age_days, clock)
else:
return None
def _decode_signed_value_v1(secret, name, value, max_age_days, clock):
parts = utf8(value).split(b"|")
if len(parts) != 3:
return None
signature = _create_signature_v1(secret, name, parts[0], parts[1])
if not _time_independent_equals(parts[2], signature):
gen_log.warning("Invalid cookie signature %r", value)
return None
timestamp = int(parts[1])
if timestamp < clock() - max_age_days * 86400:
gen_log.warning("Expired cookie %r", value)
return None
if timestamp > clock() + 31 * 86400:
# _cookie_signature does not hash a delimiter between the
# parts of the cookie, so an attacker could transfer trailing
# digits from the payload to the timestamp without altering the
# signature. For backwards compatibility, sanity-check timestamp
# here instead of modifying _cookie_signature.
gen_log.warning("Cookie timestamp in future; possible tampering %r",
value)
return None
if parts[1].startswith(b"0"):
gen_log.warning("Tampered cookie %r", value)
return None
try:
return base64.b64decode(parts[0])
except Exception:
return None
def _decode_fields_v2(value):
def _consume_field(s):
length, _, rest = s.partition(b':')
n = int(length)
field_value = rest[:n]
# In python 3, indexing bytes returns small integers; we must
# use a slice to get a byte string as in python 2.
if rest[n:n + 1] != b'|':
raise ValueError("malformed v2 signed value field")
rest = rest[n + 1:]
return field_value, rest
rest = value[2:] # remove version number
key_version, rest = _consume_field(rest)
timestamp, rest = _consume_field(rest)
name_field, rest = _consume_field(rest)
value_field, passed_sig = _consume_field(rest)
return int(key_version), timestamp, name_field, value_field, passed_sig
def _decode_signed_value_v2(secret, name, value, max_age_days, clock):
try:
key_version, timestamp, name_field, value_field, passed_sig = _decode_fields_v2(value)
except ValueError:
return None
signed_string = value[:-len(passed_sig)]
if isinstance(secret, dict):
try:
secret = secret[key_version]
except KeyError:
return None
expected_sig = _create_signature_v2(secret, signed_string)
if not _time_independent_equals(passed_sig, expected_sig):
return None
if name_field != utf8(name):
return None
timestamp = int(timestamp)
if timestamp < clock() - max_age_days * 86400:
# The signature has expired.
return None
try:
return base64.b64decode(value_field)
except Exception:
return None
def get_signature_key_version(value):
value = utf8(value)
version = _get_version(value)
if version < 2:
return None
try:
key_version, _, _, _, _ = _decode_fields_v2(value)
except ValueError:
return None
return key_version
def _create_signature_v1(secret, *parts):
hash = hmac.new(utf8(secret), digestmod=hashlib.sha1)
for part in parts:
hash.update(utf8(part))
return utf8(hash.hexdigest())
def _create_signature_v2(secret, s):
hash = hmac.new(utf8(secret), digestmod=hashlib.sha256)
hash.update(utf8(s))
return utf8(hash.hexdigest())
def is_absolute(path):
return any(path.startswith(x) for x in ["/", "http:", "https:"])
| 39.439173
| 153
| 0.61757
|
from __future__ import absolute_import, division, print_function
import base64
import binascii
import datetime
import email.utils
import functools
import gzip
import hashlib
import hmac
import mimetypes
import numbers
import os.path
import re
import stat
import sys
import threading
import time
import salt.ext.tornado as tornado
import traceback
import types
from inspect import isclass
from io import BytesIO
from salt.ext.tornado.concurrent import Future
from salt.ext.tornado import escape
from salt.ext.tornado import gen
from salt.ext.tornado import httputil
from salt.ext.tornado import iostream
from salt.ext.tornado import locale
from salt.ext.tornado.log import access_log, app_log, gen_log
from salt.ext.tornado import stack_context
from salt.ext.tornado import template
from salt.ext.tornado.escape import utf8, _unicode
from salt.ext.tornado.routing import (AnyMatches, DefaultHostMatches, HostMatches,
ReversibleRouter, Rule, ReversibleRuleRouter,
URLSpec)
from salt.ext.tornado.util import (ObjectDict, raise_exc_info,
unicode_type, _websocket_mask, PY3)
url = URLSpec
if PY3:
import http.cookies as Cookie
import urllib.parse as urlparse
from urllib.parse import urlencode
else:
import Cookie
import urlparse
from urllib import urlencode
try:
import typing
_HeaderTypes = typing.Union[bytes, unicode_type,
numbers.Integral, datetime.datetime]
except ImportError:
pass
MIN_SUPPORTED_SIGNED_VALUE_VERSION = 1
MAX_SUPPORTED_SIGNED_VALUE_VERSION = 2
DEFAULT_SIGNED_VALUE_VERSION = 2
DEFAULT_SIGNED_VALUE_MIN_VERSION = 1
class RequestHandler(object):
SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PATCH", "PUT",
"OPTIONS")
_template_loaders = {}
_template_loader_lock = threading.Lock()
_remove_control_chars_regex = re.compile(r"[\x00-\x08\x0e-\x1f]")
def __init__(self, application, request, **kwargs):
super(RequestHandler, self).__init__()
self.application = application
self.request = request
self._headers_written = False
self._finished = False
self._auto_finish = True
self._transforms = None
self._prepared_future = None
self._headers = None
self.path_args = None
self.path_kwargs = None
self.ui = ObjectDict((n, self._ui_method(m)) for n, m in
application.ui_methods.items())
self.ui["_tt_modules"] = _UIModuleNamespace(self,
application.ui_modules)
self.ui["modules"] = self.ui["_tt_modules"]
self.clear()
self.request.connection.set_close_callback(self.on_connection_close)
self.initialize(**kwargs)
def initialize(self):
pass
@property
def settings(self):
return self.application.settings
def head(self, *args, **kwargs):
raise HTTPError(405)
def get(self, *args, **kwargs):
raise HTTPError(405)
def post(self, *args, **kwargs):
raise HTTPError(405)
def delete(self, *args, **kwargs):
raise HTTPError(405)
def patch(self, *args, **kwargs):
raise HTTPError(405)
def put(self, *args, **kwargs):
raise HTTPError(405)
def options(self, *args, **kwargs):
raise HTTPError(405)
def prepare(self):
pass
def on_finish(self):
pass
def on_connection_close(self):
if _has_stream_request_body(self.__class__):
if not self.request.body.done():
self.request.body.set_exception(iostream.StreamClosedError())
self.request.body.exception()
def clear(self):
self._headers = httputil.HTTPHeaders({
"Server": "TornadoServer/%s" % tornado.version,
"Content-Type": "text/html; charset=UTF-8",
"Date": httputil.format_timestamp(time.time()),
})
self.set_default_headers()
self._write_buffer = []
self._status_code = 200
self._reason = httputil.responses[200]
def set_default_headers(self):
pass
def set_status(self, status_code, reason=None):
self._status_code = status_code
if reason is not None:
self._reason = escape.native_str(reason)
else:
try:
self._reason = httputil.responses[status_code]
except KeyError:
raise ValueError("unknown status code %d" % status_code)
def get_status(self):
return self._status_code
def set_header(self, name, value):
self._headers[name] = self._convert_header_value(value)
def add_header(self, name, value):
self._headers.add(name, self._convert_header_value(value))
def clear_header(self, name):
if name in self._headers:
del self._headers[name]
_INVALID_HEADER_CHAR_RE = re.compile(r"[\x00-\x1f]")
def _convert_header_value(self, value):
if isinstance(value, str):
retval = value
elif isinstance(value, bytes):
retval = value.decode('latin1')
elif isinstance(value, unicode_type):
retval = escape.utf8(value)
elif isinstance(value, numbers.Integral):
# return immediately since we know the converted value will be safe
return str(value)
elif isinstance(value, datetime.datetime):
return httputil.format_timestamp(value)
else:
raise TypeError("Unsupported header value %r" % value)
# If \n is allowed into the header, it is possible to inject
# additional headers or split the request.
if RequestHandler._INVALID_HEADER_CHAR_RE.search(retval):
raise ValueError("Unsafe header value %r", retval)
return retval
_ARG_DEFAULT = object()
def get_argument(self, name, default=_ARG_DEFAULT, strip=True):
return self._get_argument(name, default, self.request.arguments, strip)
def get_arguments(self, name, strip=True):
# Make sure `get_arguments` isn't accidentally being called with a
# `get_argument`.)
assert isinstance(strip, bool)
return self._get_arguments(name, self.request.arguments, strip)
def get_body_argument(self, name, default=_ARG_DEFAULT, strip=True):
return self._get_argument(name, default, self.request.body_arguments,
strip)
def get_body_arguments(self, name, strip=True):
return self._get_arguments(name, self.request.body_arguments, strip)
def get_query_argument(self, name, default=_ARG_DEFAULT, strip=True):
return self._get_argument(name, default,
self.request.query_arguments, strip)
def get_query_arguments(self, name, strip=True):
return self._get_arguments(name, self.request.query_arguments, strip)
def _get_argument(self, name, default, source, strip=True):
args = self._get_arguments(name, source, strip=strip)
if not args:
if default is self._ARG_DEFAULT:
raise MissingArgumentError(name)
return default
return args[-1]
def _get_arguments(self, name, source, strip=True):
values = []
for v in source.get(name, []):
v = self.decode_argument(v, name=name)
if isinstance(v, unicode_type):
# Get rid of any weird control chars (unless decoding gave
# us bytes, in which case leave it alone)
v = RequestHandler._remove_control_chars_regex.sub(" ", v)
if strip:
v = v.strip()
values.append(v)
return values
def decode_argument(self, value, name=None):
try:
return _unicode(value)
except UnicodeDecodeError:
raise HTTPError(400, "Invalid unicode in %s: %r" %
(name or "url", value[:40]))
@property
def cookies(self):
return self.request.cookies
def get_cookie(self, name, default=None):
if self.request.cookies is not None and name in self.request.cookies:
return self.request.cookies[name].value
return default
def set_cookie(self, name, value, domain=None, expires=None, path="/",
expires_days=None, **kwargs):
# The cookie library only accepts type str, in both python 2 and 3
name = escape.native_str(name)
value = escape.native_str(value)
if re.search(r"[\x00-\x20]", name + value):
# Don't let us accidentally inject bad stuff
raise ValueError("Invalid cookie %r: %r" % (name, value))
if not hasattr(self, "_new_cookie"):
self._new_cookie = Cookie.SimpleCookie()
if name in self._new_cookie:
del self._new_cookie[name]
self._new_cookie[name] = value
morsel = self._new_cookie[name]
if domain:
morsel["domain"] = domain
if expires_days is not None and not expires:
expires = datetime.datetime.utcnow() + datetime.timedelta(
days=expires_days)
if expires:
morsel["expires"] = httputil.format_timestamp(expires)
if path:
morsel["path"] = path
for k, v in kwargs.items():
if k == 'max_age':
k = 'max-age'
if k in ['httponly', 'secure'] and not v:
continue
morsel[k] = v
def clear_cookie(self, name, path="/", domain=None):
expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
self.set_cookie(name, value="", path=path, expires=expires,
domain=domain)
def clear_all_cookies(self, path="/", domain=None):
for name in self.request.cookies:
self.clear_cookie(name, path=path, domain=domain)
def set_secure_cookie(self, name, value, expires_days=30, version=None,
**kwargs):
self.set_cookie(name, self.create_signed_value(name, value,
version=version),
expires_days=expires_days, **kwargs)
def create_signed_value(self, name, value, version=None):
self.require_setting("cookie_secret", "secure cookies")
secret = self.application.settings["cookie_secret"]
key_version = None
if isinstance(secret, dict):
if self.application.settings.get("key_version") is None:
raise Exception("key_version setting must be used for secret_key dicts")
key_version = self.application.settings["key_version"]
return create_signed_value(secret, name, value, version=version,
key_version=key_version)
def get_secure_cookie(self, name, value=None, max_age_days=31,
min_version=None):
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
return decode_signed_value(self.application.settings["cookie_secret"],
name, value, max_age_days=max_age_days,
min_version=min_version)
def get_secure_cookie_key_version(self, name, value=None):
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
return get_signature_key_version(value)
def redirect(self, url, permanent=False, status=None):
if self._headers_written:
raise Exception("Cannot redirect after headers have been written")
if status is None:
status = 301 if permanent else 302
else:
assert isinstance(status, int) and 300 <= status <= 399
self.set_status(status)
self.set_header("Location", utf8(url))
self.finish()
def write(self, chunk):
if self._finished:
raise RuntimeError("Cannot write() after finish()")
if not isinstance(chunk, (bytes, unicode_type, dict)):
message = "write() only accepts bytes, unicode, and dict objects"
if isinstance(chunk, list):
message += ". Lists not accepted for security reasons; see http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write"
raise TypeError(message)
if isinstance(chunk, dict):
chunk = escape.json_encode(chunk)
self.set_header("Content-Type", "application/json; charset=UTF-8")
chunk = utf8(chunk)
self._write_buffer.append(chunk)
def render(self, template_name, **kwargs):
if self._finished:
raise RuntimeError("Cannot render() after finish()")
html = self.render_string(template_name, **kwargs)
js_embed = []
js_files = []
css_embed = []
css_files = []
html_heads = []
html_bodies = []
for module in getattr(self, "_active_modules", {}).values():
embed_part = module.embedded_javascript()
if embed_part:
js_embed.append(utf8(embed_part))
file_part = module.javascript_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
js_files.append(file_part)
else:
js_files.extend(file_part)
embed_part = module.embedded_css()
if embed_part:
css_embed.append(utf8(embed_part))
file_part = module.css_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
css_files.append(file_part)
else:
css_files.extend(file_part)
head_part = module.html_head()
if head_part:
html_heads.append(utf8(head_part))
body_part = module.html_body()
if body_part:
html_bodies.append(utf8(body_part))
if js_files:
js = self.render_linked_js(js_files)
sloc = html.rindex(b'</body>')
html = html[:sloc] + utf8(js) + b'\n' + html[sloc:]
if js_embed:
js = self.render_embed_js(js_embed)
sloc = html.rindex(b'</body>')
html = html[:sloc] + js + b'\n' + html[sloc:]
if css_files:
css = self.render_linked_css(css_files)
hloc = html.index(b'</head>')
html = html[:hloc] + utf8(css) + b'\n' + html[hloc:]
if css_embed:
css = self.render_embed_css(css_embed)
hloc = html.index(b'</head>')
html = html[:hloc] + css + b'\n' + html[hloc:]
if html_heads:
hloc = html.index(b'</head>')
html = html[:hloc] + b''.join(html_heads) + b'\n' + html[hloc:]
if html_bodies:
hloc = html.index(b'</body>')
html = html[:hloc] + b''.join(html_bodies) + b'\n' + html[hloc:]
self.finish(html)
def render_linked_js(self, js_files):
paths = []
unique_paths = set()
for path in js_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
return ''.join('<script src="' + escape.xhtml_escape(p) +
'" type="text/javascript"></script>'
for p in paths)
def render_embed_js(self, js_embed):
return b'<script type="text/javascript">\n//<![CDATA[\n' + \
b'\n'.join(js_embed) + b'\n//]]>\n</script>'
def render_linked_css(self, css_files):
paths = []
unique_paths = set()
for path in css_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
return ''.join('<link href="' + escape.xhtml_escape(p) + '" '
'type="text/css" rel="stylesheet"/>'
for p in paths)
def render_embed_css(self, css_embed):
return b'<style type="text/css">\n' + b'\n'.join(css_embed) + \
b'\n</style>'
def render_string(self, template_name, **kwargs):
template_path = self.get_template_path()
if not template_path:
frame = sys._getframe(0)
web_file = frame.f_code.co_filename
while frame.f_code.co_filename == web_file:
frame = frame.f_back
template_path = os.path.dirname(frame.f_code.co_filename)
with RequestHandler._template_loader_lock:
if template_path not in RequestHandler._template_loaders:
loader = self.create_template_loader(template_path)
RequestHandler._template_loaders[template_path] = loader
else:
loader = RequestHandler._template_loaders[template_path]
t = loader.load(template_name)
namespace = self.get_template_namespace()
namespace.update(kwargs)
return t.generate(**namespace)
def get_template_namespace(self):
namespace = dict(
handler=self,
request=self.request,
current_user=self.current_user,
locale=self.locale,
_=self.locale.translate,
pgettext=self.locale.pgettext,
static_url=self.static_url,
xsrf_form_html=self.xsrf_form_html,
reverse_url=self.reverse_url
)
namespace.update(self.ui)
return namespace
def create_template_loader(self, template_path):
settings = self.application.settings
if "template_loader" in settings:
return settings["template_loader"]
kwargs = {}
if "autoescape" in settings:
kwargs["autoescape"] = settings["autoescape"]
if "template_whitespace" in settings:
kwargs["whitespace"] = settings["template_whitespace"]
return template.Loader(template_path, **kwargs)
def flush(self, include_footers=False, callback=None):
chunk = b"".join(self._write_buffer)
self._write_buffer = []
if not self._headers_written:
self._headers_written = True
for transform in self._transforms:
self._status_code, self._headers, chunk = \
transform.transform_first_chunk(
self._status_code, self._headers,
chunk, include_footers)
if self.request.method == "HEAD":
chunk = None
if hasattr(self, "_new_cookie"):
for cookie in self._new_cookie.values():
self.add_header("Set-Cookie", cookie.OutputString(None))
start_line = httputil.ResponseStartLine('',
self._status_code,
self._reason)
return self.request.connection.write_headers(
start_line, self._headers, chunk, callback=callback)
else:
for transform in self._transforms:
chunk = transform.transform_chunk(chunk, include_footers)
if self.request.method != "HEAD":
return self.request.connection.write(chunk, callback=callback)
else:
future = Future()
future.set_result(None)
return future
def finish(self, chunk=None):
if self._finished:
raise RuntimeError("finish() called twice")
if chunk is not None:
self.write(chunk)
if not self._headers_written:
if (self._status_code == 200 and
self.request.method in ("GET", "HEAD") and
"Etag" not in self._headers):
self.set_etag_header()
if self.check_etag_header():
self._write_buffer = []
self.set_status(304)
if (self._status_code in (204, 304) or
(self._status_code >= 100 and self._status_code < 200)):
assert not self._write_buffer, "Cannot send body with %s" % self._status_code
self._clear_headers_for_304()
elif "Content-Length" not in self._headers:
content_length = sum(len(part) for part in self._write_buffer)
self.set_header("Content-Length", content_length)
if hasattr(self.request, "connection"):
self.request.connection.set_close_callback(None)
self.flush(include_footers=True)
self.request.finish()
self._log()
self._finished = True
self.on_finish()
self._break_cycles()
def _break_cycles(self):
self.ui = None
def send_error(self, status_code=500, **kwargs):
if self._headers_written:
gen_log.error("Cannot send error response after headers written")
if not self._finished:
try:
self.finish()
except Exception:
gen_log.error("Failed to flush partial response",
exc_info=True)
return
self.clear()
reason = kwargs.get('reason')
if 'exc_info' in kwargs:
exception = kwargs['exc_info'][1]
if isinstance(exception, HTTPError) and exception.reason:
reason = exception.reason
self.set_status(status_code, reason=reason)
try:
self.write_error(status_code, **kwargs)
except Exception:
app_log.error("Uncaught exception in write_error", exc_info=True)
if not self._finished:
self.finish()
def write_error(self, status_code, **kwargs):
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
self.set_header('Content-Type', 'text/plain')
for line in traceback.format_exception(*kwargs["exc_info"]):
self.write(line)
self.finish()
else:
self.finish("<html><title>%(code)d: %(message)s</title>"
"<body>%(code)d: %(message)s</body></html>" % {
"code": status_code,
"message": self._reason,
})
@property
def locale(self):
if not hasattr(self, "_locale"):
self._locale = self.get_user_locale()
if not self._locale:
self._locale = self.get_browser_locale()
assert self._locale
return self._locale
@locale.setter
def locale(self, value):
self._locale = value
def get_user_locale(self):
return None
def get_browser_locale(self, default="en_US"):
if "Accept-Language" in self.request.headers:
languages = self.request.headers["Accept-Language"].split(",")
locales = []
for language in languages:
parts = language.strip().split(";")
if len(parts) > 1 and parts[1].startswith("q="):
try:
score = float(parts[1][2:])
except (ValueError, TypeError):
score = 0.0
else:
score = 1.0
locales.append((parts[0], score))
if locales:
locales.sort(key=lambda pair: pair[1], reverse=True)
codes = [l[0] for l in locales]
return locale.get(*codes)
return locale.get(default)
@property
def current_user(self):
if not hasattr(self, "_current_user"):
self._current_user = self.get_current_user()
return self._current_user
@current_user.setter
def current_user(self, value):
self._current_user = value
def get_current_user(self):
return None
def get_login_url(self):
self.require_setting("login_url", "@tornado.web.authenticated")
return self.application.settings["login_url"]
def get_template_path(self):
return self.application.settings.get("template_path")
@property
def xsrf_token(self):
if not hasattr(self, "_xsrf_token"):
version, token, timestamp = self._get_raw_xsrf_token()
output_version = self.settings.get("xsrf_cookie_version", 2)
cookie_kwargs = self.settings.get("xsrf_cookie_kwargs", {})
if output_version == 1:
self._xsrf_token = binascii.b2a_hex(token)
elif output_version == 2:
mask = os.urandom(4)
self._xsrf_token = b"|".join([
b"2",
binascii.b2a_hex(mask),
binascii.b2a_hex(_websocket_mask(mask, token)),
utf8(str(int(timestamp)))])
else:
raise ValueError("unknown xsrf cookie version %d",
output_version)
if version is None:
expires_days = 30 if self.current_user else None
self.set_cookie("_xsrf", self._xsrf_token,
expires_days=expires_days,
**cookie_kwargs)
return self._xsrf_token
def _get_raw_xsrf_token(self):
if not hasattr(self, '_raw_xsrf_token'):
cookie = self.get_cookie("_xsrf")
if cookie:
version, token, timestamp = self._decode_xsrf_token(cookie)
else:
version, token, timestamp = None, None, None
if token is None:
version = None
token = os.urandom(16)
timestamp = time.time()
self._raw_xsrf_token = (version, token, timestamp)
return self._raw_xsrf_token
def _decode_xsrf_token(self, cookie):
try:
m = _signed_value_version_re.match(utf8(cookie))
if m:
version = int(m.group(1))
if version == 2:
_, mask, masked_token, timestamp = cookie.split("|")
mask = binascii.a2b_hex(utf8(mask))
token = _websocket_mask(
mask, binascii.a2b_hex(utf8(masked_token)))
timestamp = int(timestamp)
return version, token, timestamp
else:
raise Exception("Unknown xsrf cookie version")
else:
version = 1
try:
token = binascii.a2b_hex(utf8(cookie))
except (binascii.Error, TypeError):
token = utf8(cookie)
timestamp = int(time.time())
return (version, token, timestamp)
except Exception:
# Catch exceptions and return nothing instead of failing.
gen_log.debug("Uncaught exception in _decode_xsrf_token",
exc_info=True)
return None, None, None
def check_xsrf_cookie(self):
token = (self.get_argument("_xsrf", None) or
self.request.headers.get("X-Xsrftoken") or
self.request.headers.get("X-Csrftoken"))
if not token:
raise HTTPError(403, "'_xsrf' argument missing from POST")
_, token, _ = self._decode_xsrf_token(token)
_, expected_token, _ = self._get_raw_xsrf_token()
if not token:
raise HTTPError(403, "'_xsrf' argument has invalid format")
if not _time_independent_equals(utf8(token), utf8(expected_token)):
raise HTTPError(403, "XSRF cookie does not match POST argument")
def xsrf_form_html(self):
return '<input type="hidden" name="_xsrf" value="' + \
escape.xhtml_escape(self.xsrf_token) + '"/>'
def static_url(self, path, include_host=None, **kwargs):
self.require_setting("static_path", "static_url")
get_url = self.settings.get("static_handler_class",
StaticFileHandler).make_static_url
if include_host is None:
include_host = getattr(self, "include_host", False)
if include_host:
base = self.request.protocol + "://" + self.request.host
else:
base = ""
return base + get_url(self.settings, path, **kwargs)
def require_setting(self, name, feature="this feature"):
if not self.application.settings.get(name):
raise Exception("You must define the '%s' setting in your "
"application to use %s" % (name, feature))
def reverse_url(self, name, *args):
return self.application.reverse_url(name, *args)
def compute_etag(self):
hasher = hashlib.sha1()
for part in self._write_buffer:
hasher.update(part)
return '"%s"' % hasher.hexdigest()
def set_etag_header(self):
etag = self.compute_etag()
if etag is not None:
self.set_header("Etag", etag)
def check_etag_header(self):
computed_etag = utf8(self._headers.get("Etag", ""))
# Find all weak and strong etag values from If-None-Match header
# because RFC 7232 allows multiple etag values in a single header.
etags = re.findall(
br'\*|(?:W/)?"[^"]*"',
utf8(self.request.headers.get("If-None-Match", ""))
)
if not computed_etag or not etags:
return False
match = False
if etags[0] == b'*':
match = True
else:
# Use a weak comparison when comparing entity-tags.
def val(x):
return x[2:] if x.startswith(b'W/') else x
for etag in etags:
if val(etag) == val(computed_etag):
match = True
break
return match
def _stack_context_handle_exception(self, type, value, traceback):
try:
# For historical reasons _handle_request_exception only takes
# the exception value instead of the full triple,
# so re-raise the exception to ensure that it's in
# sys.exc_info()
raise_exc_info((type, value, traceback))
except Exception:
self._handle_request_exception(value)
return True
@gen.coroutine
def _execute(self, transforms, *args, **kwargs):
self._transforms = transforms
try:
if self.request.method not in self.SUPPORTED_METHODS:
raise HTTPError(405)
self.path_args = [self.decode_argument(arg) for arg in args]
self.path_kwargs = dict((k, self.decode_argument(v, name=k))
for (k, v) in kwargs.items())
# If XSRF cookies are turned on, reject form submissions without
# the proper cookie
if self.request.method not in ("GET", "HEAD", "OPTIONS") and \
self.application.settings.get("xsrf_cookies"):
self.check_xsrf_cookie()
result = self.prepare()
if result is not None:
result = yield result
if self._prepared_future is not None:
# Tell the Application we've finished with prepare()
# and are ready for the body to arrive.
self._prepared_future.set_result(None)
if self._finished:
return
if _has_stream_request_body(self.__class__):
# In streaming mode request.body is a Future that signals
# the body has been completely received. The Future has no
# result; the data has been passed to self.data_received
# instead.
try:
yield self.request.body
except iostream.StreamClosedError:
return
method = getattr(self, self.request.method.lower())
result = method(*self.path_args, **self.path_kwargs)
if result is not None:
result = yield result
if self._auto_finish and not self._finished:
self.finish()
except Exception as e:
try:
self._handle_request_exception(e)
except Exception:
app_log.error("Exception in exception handler", exc_info=True)
if (self._prepared_future is not None and
not self._prepared_future.done()):
# In case we failed before setting _prepared_future, do it
# now (to unblock the HTTP server). Note that this is not
# in a finally block to avoid GC issues prior to Python 3.4.
self._prepared_future.set_result(None)
def data_received(self, chunk):
raise NotImplementedError()
def _log(self):
self.application.log_request(self)
def _request_summary(self):
return "%s %s (%s)" % (self.request.method, self.request.uri,
self.request.remote_ip)
def _handle_request_exception(self, e):
if isinstance(e, Finish):
# Not an error; just finish the request without logging.
if not self._finished:
self.finish(*e.args)
return
try:
self.log_exception(*sys.exc_info())
except Exception:
# An error here should still get a best-effort send_error()
# to avoid leaking the connection.
app_log.error("Error in exception logger", exc_info=True)
if self._finished:
# Extra errors after the request has been finished should
# be logged, but there is no reason to continue to try and
# send a response.
return
if isinstance(e, HTTPError):
if e.status_code not in httputil.responses and not e.reason:
gen_log.error("Bad HTTP status code: %d", e.status_code)
self.send_error(500, exc_info=sys.exc_info())
else:
self.send_error(e.status_code, exc_info=sys.exc_info())
else:
self.send_error(500, exc_info=sys.exc_info())
def log_exception(self, typ, value, tb):
if isinstance(value, HTTPError):
if value.log_message:
format = "%d %s: " + value.log_message
args = ([value.status_code, self._request_summary()] +
list(value.args))
gen_log.warning(format, *args)
else:
app_log.error("Uncaught exception %s\n%r", self._request_summary(),
self.request, exc_info=(typ, value, tb))
def _ui_module(self, name, module):
def render(*args, **kwargs):
if not hasattr(self, "_active_modules"):
self._active_modules = {}
if name not in self._active_modules:
self._active_modules[name] = module(self)
rendered = self._active_modules[name].render(*args, **kwargs)
return rendered
return render
def _ui_method(self, method):
return lambda *args, **kwargs: method(self, *args, **kwargs)
def _clear_headers_for_304(self):
# 304 responses should not contain entity headers (defined in
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.1)
# not explicitly allowed by
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
headers = ["Allow", "Content-Encoding", "Content-Language",
"Content-Length", "Content-MD5", "Content-Range",
"Content-Type", "Last-Modified"]
for h in headers:
self.clear_header(h)
def asynchronous(method):
# Delay the IOLoop import because it's not available on app engine.
from salt.ext.tornado.ioloop import IOLoop
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
self._auto_finish = False
with stack_context.ExceptionStackContext(
self._stack_context_handle_exception):
result = method(self, *args, **kwargs)
if result is not None:
result = gen.convert_yielded(result)
# If @asynchronous is used with @gen.coroutine, (but
# not @gen.engine), we can automatically finish the
# request when the future resolves. Additionally,
# the Future will swallow any exceptions so we need
# to throw them back out to the stack context to finish
# the request.
def future_complete(f):
f.result()
if not self._finished:
self.finish()
IOLoop.current().add_future(result, future_complete)
# Once we have done this, hide the Future from our
# caller (i.e. RequestHandler._when_complete), which
# would otherwise set up its own callback and
# exception handler (resulting in exceptions being
# logged twice).
return None
return result
return wrapper
def stream_request_body(cls):
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
cls._stream_request_body = True
return cls
def _has_stream_request_body(cls):
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
return getattr(cls, '_stream_request_body', False)
def removeslash(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path.rstrip("/")
if uri: # don't try to redirect '/' to ''
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return
else:
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
def addslash(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path + "/"
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
class _ApplicationRouter(ReversibleRuleRouter):
def __init__(self, application, rules=None):
assert isinstance(application, Application)
self.application = application
super(_ApplicationRouter, self).__init__(rules)
def process_rule(self, rule):
rule = super(_ApplicationRouter, self).process_rule(rule)
if isinstance(rule.target, (list, tuple)):
rule.target = _ApplicationRouter(self.application, rule.target)
return rule
def get_target_delegate(self, target, request, **target_params):
if isclass(target) and issubclass(target, RequestHandler):
return self.application.get_handler_delegate(request, target, **target_params)
return super(_ApplicationRouter, self).get_target_delegate(target, request, **target_params)
class Application(ReversibleRouter):
def __init__(self, handlers=None, default_host=None, transforms=None,
**settings):
if transforms is None:
self.transforms = []
if settings.get("compress_response") or settings.get("gzip"):
self.transforms.append(GZipContentEncoding)
else:
self.transforms = transforms
self.default_host = default_host
self.settings = settings
self.ui_modules = {'linkify': _linkify,
'xsrf_form_html': _xsrf_form_html,
'Template': TemplateModule,
}
self.ui_methods = {}
self._load_ui_modules(settings.get("ui_modules", {}))
self._load_ui_methods(settings.get("ui_methods", {}))
if self.settings.get("static_path"):
path = self.settings["static_path"]
handlers = list(handlers or [])
static_url_prefix = settings.get("static_url_prefix",
"/static/")
static_handler_class = settings.get("static_handler_class",
StaticFileHandler)
static_handler_args = settings.get("static_handler_args", {})
static_handler_args['path'] = path
for pattern in [re.escape(static_url_prefix) + r"(.*)",
r"/(favicon\.ico)", r"/(robots\.txt)"]:
handlers.insert(0, (pattern, static_handler_class,
static_handler_args))
if self.settings.get('debug'):
self.settings.setdefault('autoreload', True)
self.settings.setdefault('compiled_template_cache', False)
self.settings.setdefault('static_hash_cache', False)
self.settings.setdefault('serve_traceback', True)
self.wildcard_router = _ApplicationRouter(self, handlers)
self.default_router = _ApplicationRouter(self, [
Rule(AnyMatches(), self.wildcard_router)
])
# Automatically reload modified modules
if self.settings.get('autoreload'):
from salt.ext.tornado import autoreload
autoreload.start()
def listen(self, port, address="", **kwargs):
# import is here rather than top level because HTTPServer
# is not importable on appengine
from salt.ext.tornado.httpserver import HTTPServer
server = HTTPServer(self, **kwargs)
server.listen(port, address)
return server
def add_handlers(self, host_pattern, host_handlers):
host_matcher = HostMatches(host_pattern)
rule = Rule(host_matcher, _ApplicationRouter(self, host_handlers))
self.default_router.rules.insert(-1, rule)
if self.default_host is not None:
self.wildcard_router.add_rules([(
DefaultHostMatches(self, host_matcher.host_pattern),
host_handlers
)])
def add_transform(self, transform_class):
self.transforms.append(transform_class)
def _load_ui_methods(self, methods):
if isinstance(methods, types.ModuleType):
self._load_ui_methods(dict((n, getattr(methods, n))
for n in dir(methods)))
elif isinstance(methods, list):
for m in methods:
self._load_ui_methods(m)
else:
for name, fn in methods.items():
if not name.startswith("_") and hasattr(fn, "__call__") \
and name[0].lower() == name[0]:
self.ui_methods[name] = fn
def _load_ui_modules(self, modules):
if isinstance(modules, types.ModuleType):
self._load_ui_modules(dict((n, getattr(modules, n))
for n in dir(modules)))
elif isinstance(modules, list):
for m in modules:
self._load_ui_modules(m)
else:
assert isinstance(modules, dict)
for name, cls in modules.items():
try:
if issubclass(cls, UIModule):
self.ui_modules[name] = cls
except TypeError:
pass
def __call__(self, request):
# Legacy HTTPServer interface
dispatcher = self.find_handler(request)
return dispatcher.execute()
def find_handler(self, request, **kwargs):
route = self.default_router.find_handler(request)
if route is not None:
return route
if self.settings.get('default_handler_class'):
return self.get_handler_delegate(
request,
self.settings['default_handler_class'],
self.settings.get('default_handler_args', {}))
return self.get_handler_delegate(
request, ErrorHandler, {'status_code': 404})
def get_handler_delegate(self, request, target_class, target_kwargs=None,
path_args=None, path_kwargs=None):
return _HandlerDelegate(
self, request, target_class, target_kwargs, path_args, path_kwargs)
def reverse_url(self, name, *args):
reversed_url = self.default_router.reverse_url(name, *args)
if reversed_url is not None:
return reversed_url
raise KeyError("%s not found in named urls" % name)
def log_request(self, handler):
if "log_function" in self.settings:
self.settings["log_function"](handler)
return
if handler.get_status() < 400:
log_method = access_log.info
elif handler.get_status() < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * handler.request.request_time()
log_method("%d %s %.2fms", handler.get_status(),
handler._request_summary(), request_time)
class _HandlerDelegate(httputil.HTTPMessageDelegate):
def __init__(self, application, request, handler_class, handler_kwargs,
path_args, path_kwargs):
self.application = application
self.connection = request.connection
self.request = request
self.handler_class = handler_class
self.handler_kwargs = handler_kwargs or {}
self.path_args = path_args or []
self.path_kwargs = path_kwargs or {}
self.chunks = []
self.stream_request_body = _has_stream_request_body(self.handler_class)
def headers_received(self, start_line, headers):
if self.stream_request_body:
self.request.body = Future()
return self.execute()
def data_received(self, data):
if self.stream_request_body:
return self.handler.data_received(data)
else:
self.chunks.append(data)
def finish(self):
if self.stream_request_body:
self.request.body.set_result(None)
else:
self.request.body = b''.join(self.chunks)
self.request._parse_body()
self.execute()
def on_connection_close(self):
if self.stream_request_body:
self.handler.on_connection_close()
else:
self.chunks = None
def execute(self):
# If template cache is disabled (usually in the debug mode),
# re-compile templates and reload static files on every
# request so you don't need to restart to see changes
if not self.application.settings.get("compiled_template_cache", True):
with RequestHandler._template_loader_lock:
for loader in RequestHandler._template_loaders.values():
loader.reset()
if not self.application.settings.get('static_hash_cache', True):
StaticFileHandler.reset()
self.handler = self.handler_class(self.application, self.request,
**self.handler_kwargs)
transforms = [t(self.request) for t in self.application.transforms]
if self.stream_request_body:
self.handler._prepared_future = Future()
# Note that if an exception escapes handler._execute it will be
# trapped in the Future it returns (which we are ignoring here,
# leaving it to be logged when the Future is GC'd).
# However, that shouldn't happen because _execute has a blanket
# except handler, and we cannot easily access the IOLoop here to
# call add_future (because of the requirement to remain compatible
# with WSGI)
self.handler._execute(transforms, *self.path_args,
**self.path_kwargs)
# If we are streaming the request body, then execute() is finished
# when the handler has prepared to receive the body. If not,
# it doesn't matter when execute() finishes (so we return None)
return self.handler._prepared_future
class HTTPError(Exception):
def __init__(self, status_code=500, log_message=None, *args, **kwargs):
self.status_code = status_code
self.log_message = log_message
self.args = args
self.reason = kwargs.get('reason', None)
if log_message and not args:
self.log_message = log_message.replace('%', '%%')
def __str__(self):
message = "HTTP %d: %s" % (
self.status_code,
self.reason or httputil.responses.get(self.status_code, 'Unknown'))
if self.log_message:
return message + " (" + (self.log_message % self.args) + ")"
else:
return message
class Finish(Exception):
pass
class MissingArgumentError(HTTPError):
def __init__(self, arg_name):
super(MissingArgumentError, self).__init__(
400, 'Missing argument %s' % arg_name)
self.arg_name = arg_name
class ErrorHandler(RequestHandler):
def initialize(self, status_code):
self.set_status(status_code)
def prepare(self):
raise HTTPError(self._status_code)
def check_xsrf_cookie(self):
# POSTs to an ErrorHandler don't actually have side effects,
# so we don't need to check the xsrf token. This allows POSTs
# to the wrong url to return a 404 instead of 403.
pass
class RedirectHandler(RequestHandler):
def initialize(self, url, permanent=True):
self._url = url
self._permanent = permanent
def get(self, *args):
self.redirect(self._url.format(*args), permanent=self._permanent)
class StaticFileHandler(RequestHandler):
CACHE_MAX_AGE = 86400 * 365 * 10 # 10 years
_static_hashes = {} # type: typing.Dict
_lock = threading.Lock() # protects _static_hashes
def initialize(self, path, default_filename=None):
self.root = path
self.default_filename = default_filename
@classmethod
def reset(cls):
with cls._lock:
cls._static_hashes = {}
def head(self, path):
return self.get(path, include_body=False)
@gen.coroutine
def get(self, path, include_body=True):
# Set up our path instance variables.
self.path = self.parse_url_path(path)
del path # make sure we don't refer to path instead of self.path again
absolute_path = self.get_absolute_path(self.root, self.path)
self.absolute_path = self.validate_absolute_path(
self.root, absolute_path)
if self.absolute_path is None:
return
self.modified = self.get_modified_time()
self.set_headers()
if self.should_return_304():
self.set_status(304)
return
request_range = None
range_header = self.request.headers.get("Range")
if range_header:
# As per RFC 2616 14.16, if an invalid Range header is specified,
# the request will be treated as if the header didn't exist.
request_range = httputil._parse_request_range(range_header)
size = self.get_content_size()
if request_range:
start, end = request_range
if (start is not None and start >= size) or end == 0:
# As per RFC 2616 14.35.1, a range is not satisfiable only: if
# the first requested byte is equal to or greater than the
# content, or when a suffix with length 0 is specified
self.set_status(416) # Range Not Satisfiable
self.set_header("Content-Type", "text/plain")
self.set_header("Content-Range", "bytes */%s" % (size, ))
return
if start is not None and start < 0:
start += size
if end is not None and end > size:
# Clients sometimes blindly use a large range to limit their
# download size; cap the endpoint at the actual file size.
end = size
# Note: only return HTTP 206 if less than the entire range has been
# requested. Not only is this semantically correct, but Chrome
# refuses to play audio if it gets an HTTP 206 in response to
# ``Range: bytes=0-``.
if size != (end or size) - (start or 0):
self.set_status(206) # Partial Content
self.set_header("Content-Range",
httputil._get_content_range(start, end, size))
else:
start = end = None
if start is not None and end is not None:
content_length = end - start
elif end is not None:
content_length = end
elif start is not None:
content_length = size - start
else:
content_length = size
self.set_header("Content-Length", content_length)
if include_body:
content = self.get_content(self.absolute_path, start, end)
if isinstance(content, bytes):
content = [content]
for chunk in content:
try:
self.write(chunk)
yield self.flush()
except iostream.StreamClosedError:
return
else:
assert self.request.method == "HEAD"
def compute_etag(self):
version_hash = self._get_cached_version(self.absolute_path)
if not version_hash:
return None
return '"%s"' % (version_hash, )
def set_headers(self):
self.set_header("Accept-Ranges", "bytes")
self.set_etag_header()
if self.modified is not None:
self.set_header("Last-Modified", self.modified)
content_type = self.get_content_type()
if content_type:
self.set_header("Content-Type", content_type)
cache_time = self.get_cache_time(self.path, self.modified,
content_type)
if cache_time > 0:
self.set_header("Expires", datetime.datetime.utcnow() +
datetime.timedelta(seconds=cache_time))
self.set_header("Cache-Control", "max-age=" + str(cache_time))
self.set_extra_headers(self.path)
def should_return_304(self):
if self.check_etag_header():
return True
# Check the If-Modified-Since, and don't send the result if the
# content has not been modified
ims_value = self.request.headers.get("If-Modified-Since")
if ims_value is not None:
date_tuple = email.utils.parsedate(ims_value)
if date_tuple is not None:
if_since = datetime.datetime(*date_tuple[:6])
if if_since >= self.modified:
return True
return False
@classmethod
def get_absolute_path(cls, root, path):
abspath = os.path.abspath(os.path.join(root, path))
return abspath
def validate_absolute_path(self, root, absolute_path):
# os.path.abspath strips a trailing /.
# We must add it back to `root` so that we only match files
# in a directory named `root` instead of files starting with
# that prefix.
root = os.path.abspath(root)
if not root.endswith(os.path.sep):
# abspath always removes a trailing slash, except when
# root is '/'. This is an unusual case, but several projects
# have independently discovered this technique to disable
# Tornado's path validation and (hopefully) do their own,
# so we need to support it.
root += os.path.sep
# The trailing slash also needs to be temporarily added back
# the requested path so a request to root/ will match.
if not (absolute_path + os.path.sep).startswith(root):
raise HTTPError(403, "%s is not in root static directory",
self.path)
if (os.path.isdir(absolute_path) and
self.default_filename is not None):
# need to look at the request.path here for when path is empty
# but there is some prefix to the path that was already
# trimmed by the routing
if not self.request.path.endswith("/"):
self.redirect(self.request.path + "/", permanent=True)
return
absolute_path = os.path.join(absolute_path, self.default_filename)
if not os.path.exists(absolute_path):
raise HTTPError(404)
if not os.path.isfile(absolute_path):
raise HTTPError(403, "%s is not a file", self.path)
return absolute_path
@classmethod
def get_content(cls, abspath, start=None, end=None):
with open(abspath, "rb") as file:
if start is not None:
file.seek(start)
if end is not None:
remaining = end - (start or 0)
else:
remaining = None
while True:
chunk_size = 64 * 1024
if remaining is not None and remaining < chunk_size:
chunk_size = remaining
chunk = file.read(chunk_size)
if chunk:
if remaining is not None:
remaining -= len(chunk)
yield chunk
else:
if remaining is not None:
assert remaining == 0
return
@classmethod
def get_content_version(cls, abspath):
data = cls.get_content(abspath)
hasher = hashlib.md5()
if isinstance(data, bytes):
hasher.update(data)
else:
for chunk in data:
hasher.update(chunk)
return hasher.hexdigest()
def _stat(self):
if not hasattr(self, '_stat_result'):
self._stat_result = os.stat(self.absolute_path)
return self._stat_result
def get_content_size(self):
stat_result = self._stat()
return stat_result[stat.ST_SIZE]
def get_modified_time(self):
stat_result = self._stat()
modified = datetime.datetime.utcfromtimestamp(
stat_result[stat.ST_MTIME])
return modified
def get_content_type(self):
mime_type, encoding = mimetypes.guess_type(self.absolute_path)
# per RFC 6713, use the appropriate type for a gzip compressed file
if encoding == "gzip":
return "application/gzip"
# As of 2015-07-21 there is no bzip2 encoding defined at
# http://www.iana.org/assignments/media-types/media-types.xhtml
# So for that (and any other encoding), use octet-stream.
elif encoding is not None:
return "application/octet-stream"
elif mime_type is not None:
return mime_type
# if mime_type not detected, use application/octet-stream
else:
return "application/octet-stream"
def set_extra_headers(self, path):
pass
def get_cache_time(self, path, modified, mime_type):
return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0
@classmethod
def make_static_url(cls, settings, path, include_version=True):
url = settings.get('static_url_prefix', '/static/') + path
if not include_version:
return url
version_hash = cls.get_version(settings, path)
if not version_hash:
return url
return '%s?v=%s' % (url, version_hash)
def parse_url_path(self, url_path):
if os.path.sep != "/":
url_path = url_path.replace("/", os.path.sep)
return url_path
@classmethod
def get_version(cls, settings, path):
abs_path = cls.get_absolute_path(settings['static_path'], path)
return cls._get_cached_version(abs_path)
@classmethod
def _get_cached_version(cls, abs_path):
with cls._lock:
hashes = cls._static_hashes
if abs_path not in hashes:
try:
hashes[abs_path] = cls.get_content_version(abs_path)
except Exception:
gen_log.error("Could not open static file %r", abs_path)
hashes[abs_path] = None
hsh = hashes.get(abs_path)
if hsh:
return hsh
return None
class FallbackHandler(RequestHandler):
def initialize(self, fallback):
self.fallback = fallback
def prepare(self):
self.fallback(self.request)
self._finished = True
class OutputTransform(object):
def __init__(self, request):
pass
def transform_first_chunk(self, status_code, headers, chunk, finishing):
# type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes]
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
return chunk
class GZipContentEncoding(OutputTransform):
# Whitelist of compressible mime types (in addition to any types
# beginning with "text/").
CONTENT_TYPES = set(["application/javascript", "application/x-javascript",
"application/xml", "application/atom+xml",
"application/json", "application/xhtml+xml",
"image/svg+xml"])
# Python's GzipFile defaults to level 9, while most other gzip
# tools (including gzip itself) default to 6, which is probably a
# better CPU/size tradeoff.
GZIP_LEVEL = 6
# Responses that are too short are unlikely to benefit from gzipping
# after considering the "Content-Encoding: gzip" header and the header
# inside the gzip encoding.
# Note that responses written in multiple chunks will be compressed
# regardless of size.
MIN_LENGTH = 1024
def __init__(self, request):
self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "")
def _compressible_type(self, ctype):
return ctype.startswith('text/') or ctype in self.CONTENT_TYPES
def transform_first_chunk(self, status_code, headers, chunk, finishing):
# type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes]
# TODO: can/should this type be inherited from the superclass?
if 'Vary' in headers:
headers['Vary'] += ', Accept-Encoding'
else:
headers['Vary'] = 'Accept-Encoding'
if self._gzipping:
ctype = _unicode(headers.get("Content-Type", "")).split(";")[0]
self._gzipping = self._compressible_type(ctype) and \
(not finishing or len(chunk) >= self.MIN_LENGTH) and \
("Content-Encoding" not in headers)
if self._gzipping:
headers["Content-Encoding"] = "gzip"
self._gzip_value = BytesIO()
self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value,
compresslevel=self.GZIP_LEVEL)
chunk = self.transform_chunk(chunk, finishing)
if "Content-Length" in headers:
# The original content length is no longer correct.
# If this is the last (and only) chunk, we can set the new
# content-length; otherwise we remove it and fall back to
# chunked encoding.
if finishing:
headers["Content-Length"] = str(len(chunk))
else:
del headers["Content-Length"]
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
if self._gzipping:
self._gzip_file.write(chunk)
if finishing:
self._gzip_file.close()
else:
self._gzip_file.flush()
chunk = self._gzip_value.getvalue()
self._gzip_value.truncate(0)
self._gzip_value.seek(0)
return chunk
def authenticated(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.current_user:
if self.request.method in ("GET", "HEAD"):
url = self.get_login_url()
if "?" not in url:
if urlparse.urlsplit(url).scheme:
# if login url is absolute, make next absolute too
next_url = self.request.full_url()
else:
next_url = self.request.uri
url += "?" + urlencode(dict(next=next_url))
self.redirect(url)
return
raise HTTPError(403)
return method(self, *args, **kwargs)
return wrapper
class UIModule(object):
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.ui = handler.ui
self.locale = handler.locale
@property
def current_user(self):
return self.handler.current_user
def render(self, *args, **kwargs):
raise NotImplementedError()
def embedded_javascript(self):
return None
def javascript_files(self):
return None
def embedded_css(self):
return None
def css_files(self):
return None
def html_head(self):
return None
def html_body(self):
return None
def render_string(self, path, **kwargs):
return self.handler.render_string(path, **kwargs)
class _linkify(UIModule):
def render(self, text, **kwargs):
return escape.linkify(text, **kwargs)
class _xsrf_form_html(UIModule):
def render(self):
return self.handler.xsrf_form_html()
class TemplateModule(UIModule):
def __init__(self, handler):
super(TemplateModule, self).__init__(handler)
# keep resources in both a list and a dict to preserve order
self._resource_list = []
self._resource_dict = {}
def render(self, path, **kwargs):
def set_resources(**kwargs):
if path not in self._resource_dict:
self._resource_list.append(kwargs)
self._resource_dict[path] = kwargs
else:
if self._resource_dict[path] != kwargs:
raise ValueError("set_resources called with different "
"resources for the same template")
return ""
return self.render_string(path, set_resources=set_resources,
**kwargs)
def _get_resources(self, key):
return (r[key] for r in self._resource_list if key in r)
def embedded_javascript(self):
return "\n".join(self._get_resources("embedded_javascript"))
def javascript_files(self):
result = []
for f in self._get_resources("javascript_files"):
if isinstance(f, (unicode_type, bytes)):
result.append(f)
else:
result.extend(f)
return result
def embedded_css(self):
return "\n".join(self._get_resources("embedded_css"))
def css_files(self):
result = []
for f in self._get_resources("css_files"):
if isinstance(f, (unicode_type, bytes)):
result.append(f)
else:
result.extend(f)
return result
def html_head(self):
return "".join(self._get_resources("html_head"))
def html_body(self):
return "".join(self._get_resources("html_body"))
class _UIModuleNamespace(object):
def __init__(self, handler, ui_modules):
self.handler = handler
self.ui_modules = ui_modules
def __getitem__(self, key):
return self.handler._ui_module(key, self.ui_modules[key])
def __getattr__(self, key):
try:
return self[key]
except KeyError as e:
raise AttributeError(str(e))
if hasattr(hmac, 'compare_digest'): # python 3.3
_time_independent_equals = hmac.compare_digest
else:
def _time_independent_equals(a, b):
if len(a) != len(b):
return False
result = 0
if isinstance(a[0], int): # python3 byte strings
for x, y in zip(a, b):
result |= x ^ y
else: # python2
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def create_signed_value(secret, name, value, version=None, clock=None,
key_version=None):
if version is None:
version = DEFAULT_SIGNED_VALUE_VERSION
if clock is None:
clock = time.time
timestamp = utf8(str(int(clock())))
value = base64.b64encode(utf8(value))
if version == 1:
signature = _create_signature_v1(secret, name, value, timestamp)
value = b"|".join([value, timestamp, signature])
return value
elif version == 2:
# The v2 format consists of a version number and a series of
# length-prefixed fields "%d:%s", the last of which is a
# signature, all separated by pipes. All numbers are in
# decimal format with no leading zeros. The signature is an
# HMAC-SHA256 of the whole string up to that point, including
# the final pipe.
#
# The fields are:
# - format version (i.e. 2; no length prefix)
# - key version (integer, default is 0)
# - timestamp (integer seconds since epoch)
# - name (not encoded; assumed to be ~alphanumeric)
# - value (base64-encoded)
# - signature (hex-encoded; no length prefix)
def format_field(s):
return utf8("%d:" % len(s)) + utf8(s)
to_sign = b"|".join([
b"2",
format_field(str(key_version or 0)),
format_field(timestamp),
format_field(name),
format_field(value),
b''])
if isinstance(secret, dict):
assert key_version is not None, 'Key version must be set when sign key dict is used'
assert version >= 2, 'Version must be at least 2 for key version support'
secret = secret[key_version]
signature = _create_signature_v2(secret, to_sign)
return to_sign + signature
else:
raise ValueError("Unsupported version %d" % version)
# A leading version number in decimal
# with no leading zeros, followed by a pipe.
_signed_value_version_re = re.compile(br"^([1-9][0-9]*)\|(.*)$")
def _get_version(value):
# Figures out what version value is. Version 1 did not include an
# explicit version field and started with arbitrary base64 data,
# which makes this tricky.
m = _signed_value_version_re.match(value)
if m is None:
version = 1
else:
try:
version = int(m.group(1))
if version > 999:
# Certain payloads from the version-less v1 format may
# be parsed as valid integers. Due to base64 padding
# restrictions, this can only happen for numbers whose
# length is a multiple of 4, so we can treat all
# numbers up to 999 as versions, and for the rest we
# fall back to v1 format.
version = 1
except ValueError:
version = 1
return version
def decode_signed_value(secret, name, value, max_age_days=31,
clock=None, min_version=None):
if clock is None:
clock = time.time
if min_version is None:
min_version = DEFAULT_SIGNED_VALUE_MIN_VERSION
if min_version > 2:
raise ValueError("Unsupported min_version %d" % min_version)
if not value:
return None
value = utf8(value)
version = _get_version(value)
if version < min_version:
return None
if version == 1:
return _decode_signed_value_v1(secret, name, value,
max_age_days, clock)
elif version == 2:
return _decode_signed_value_v2(secret, name, value,
max_age_days, clock)
else:
return None
def _decode_signed_value_v1(secret, name, value, max_age_days, clock):
parts = utf8(value).split(b"|")
if len(parts) != 3:
return None
signature = _create_signature_v1(secret, name, parts[0], parts[1])
if not _time_independent_equals(parts[2], signature):
gen_log.warning("Invalid cookie signature %r", value)
return None
timestamp = int(parts[1])
if timestamp < clock() - max_age_days * 86400:
gen_log.warning("Expired cookie %r", value)
return None
if timestamp > clock() + 31 * 86400:
# _cookie_signature does not hash a delimiter between the
# parts of the cookie, so an attacker could transfer trailing
# digits from the payload to the timestamp without altering the
# signature. For backwards compatibility, sanity-check timestamp
# here instead of modifying _cookie_signature.
gen_log.warning("Cookie timestamp in future; possible tampering %r",
value)
return None
if parts[1].startswith(b"0"):
gen_log.warning("Tampered cookie %r", value)
return None
try:
return base64.b64decode(parts[0])
except Exception:
return None
def _decode_fields_v2(value):
def _consume_field(s):
length, _, rest = s.partition(b':')
n = int(length)
field_value = rest[:n]
# In python 3, indexing bytes returns small integers; we must
# use a slice to get a byte string as in python 2.
if rest[n:n + 1] != b'|':
raise ValueError("malformed v2 signed value field")
rest = rest[n + 1:]
return field_value, rest
rest = value[2:] # remove version number
key_version, rest = _consume_field(rest)
timestamp, rest = _consume_field(rest)
name_field, rest = _consume_field(rest)
value_field, passed_sig = _consume_field(rest)
return int(key_version), timestamp, name_field, value_field, passed_sig
def _decode_signed_value_v2(secret, name, value, max_age_days, clock):
try:
key_version, timestamp, name_field, value_field, passed_sig = _decode_fields_v2(value)
except ValueError:
return None
signed_string = value[:-len(passed_sig)]
if isinstance(secret, dict):
try:
secret = secret[key_version]
except KeyError:
return None
expected_sig = _create_signature_v2(secret, signed_string)
if not _time_independent_equals(passed_sig, expected_sig):
return None
if name_field != utf8(name):
return None
timestamp = int(timestamp)
if timestamp < clock() - max_age_days * 86400:
# The signature has expired.
return None
try:
return base64.b64decode(value_field)
except Exception:
return None
def get_signature_key_version(value):
value = utf8(value)
version = _get_version(value)
if version < 2:
return None
try:
key_version, _, _, _, _ = _decode_fields_v2(value)
except ValueError:
return None
return key_version
def _create_signature_v1(secret, *parts):
hash = hmac.new(utf8(secret), digestmod=hashlib.sha1)
for part in parts:
hash.update(utf8(part))
return utf8(hash.hexdigest())
def _create_signature_v2(secret, s):
hash = hmac.new(utf8(secret), digestmod=hashlib.sha256)
hash.update(utf8(s))
return utf8(hash.hexdigest())
def is_absolute(path):
return any(path.startswith(x) for x in ["/", "http:", "https:"])
| true
| true
|
f717643a746a127d6805e12ee1e2871ece59b391
| 231
|
py
|
Python
|
replacefs/colors.py
|
yoarch/replace
|
5255810c019141f7de03b96c26a9b732d2218597
|
[
"MIT"
] | null | null | null |
replacefs/colors.py
|
yoarch/replace
|
5255810c019141f7de03b96c26a9b732d2218597
|
[
"MIT"
] | null | null | null |
replacefs/colors.py
|
yoarch/replace
|
5255810c019141f7de03b96c26a9b732d2218597
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
RED = '\033[38;5;196;1m'
ORANGE = '\033[38;5;202;1m'
WHITE = '\033[1;37m'
BLUE = '\033[1;34m'
BASE_C = '\033[0m'
GREEN = '\033[38;5;40;1m'
PURPLE = '\033[38;5;135;1m'
GREY = '\033[1;30m'
YELLOW = '\033[1;33m'
| 21
| 27
| 0.588745
|
RED = '\033[38;5;196;1m'
ORANGE = '\033[38;5;202;1m'
WHITE = '\033[1;37m'
BLUE = '\033[1;34m'
BASE_C = '\033[0m'
GREEN = '\033[38;5;40;1m'
PURPLE = '\033[38;5;135;1m'
GREY = '\033[1;30m'
YELLOW = '\033[1;33m'
| true
| true
|
f7176531240df0f77d476afe1bbca902bbc7bc3d
| 2,864
|
py
|
Python
|
src/assets/sd_vaccine_plots/main.py
|
drvinceknight/amwoss
|
8b0bf80f0a06dc5cf9bfeef4b9f9e174ccadf06d
|
[
"MIT"
] | 1
|
2022-03-21T21:35:44.000Z
|
2022-03-21T21:35:44.000Z
|
src/assets/sd_vaccine_plots/main.py
|
drvinceknight/amwoss
|
8b0bf80f0a06dc5cf9bfeef4b9f9e174ccadf06d
|
[
"MIT"
] | 71
|
2019-11-18T11:00:25.000Z
|
2021-10-21T22:49:40.000Z
|
src/assets/sd_vaccine_plots/main.py
|
drvinceknight/amwoss
|
8b0bf80f0a06dc5cf9bfeef4b9f9e174ccadf06d
|
[
"MIT"
] | 1
|
2020-01-15T12:00:49.000Z
|
2020-01-15T12:00:49.000Z
|
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
def derivatives(t, y, vaccine_rate, birth_rate=0.01):
"""Defines the system of differential equations that
describe the epidemiology model.
Args:
t: a positive float
y: a tuple of three integers
vaccine_rate: a positive float <= 1
birth_rate: a positive float <= 1
Returns:
A tuple containing dS, dI, and dR
"""
infection_rate = 0.3
recovery_rate = 0.02
death_rate = 0.01
S, I, R = y
N = S + I + R
dSdt = (
-((infection_rate * S * I) / N)
+ ((1 - vaccine_rate) * birth_rate * N)
- (death_rate * S)
)
dIdt = (
((infection_rate * S * I) / N)
- (recovery_rate * I)
- (death_rate * I)
)
dRdt = (
(recovery_rate * I)
- (death_rate * R)
+ (vaccine_rate * birth_rate * N)
)
return dSdt, dIdt, dRdt
def integrate_ode(
derivative_function,
t_span,
y0=(2999, 1, 0),
vaccine_rate=0.85,
birth_rate=0.01,
):
"""Numerically solve the system of differential equations.
Args:
derivative_function: a function returning a tuple
of three floats
t_span: endpoints oif the time range to integrate over
y0: a tuple of three integers (default: (2999, 1, 0))
vaccine_rate: a positive float <= 1 (default: 0.85)
birth_rate: a positive float <= 1 (default: 0.01)
Returns:
A tuple of three arrays
"""
sol = solve_ivp(
derivative_function,
t_span,
y0,
args=(vaccine_rate, birth_rate),
)
ts, S, I, R = sol.t, sol.y[0], sol.y[1], sol.y[2]
return ts, S, I, R
t_span = [0, 730]
t, S, I, R = integrate_ode(derivatives, t_span, vaccine_rate=0.0)
fig, ax = plt.subplots(1, figsize=(10, 5))
ax.plot(t, S, label='Susceptible', c='black', linestyle='solid', linewidth=1.75)
ax.plot(t, I, label='Infected', c='black', linestyle='dotted', linewidth=1.75)
ax.plot(t, R, label='Recovered', c='black', linestyle='dashed', linewidth=1.75)
ax.legend(fontsize=14, frameon=True, ncol=3, bbox_to_anchor=(0.85, 1.13))
ax.set_xlabel('Time', fontsize=14)
ax.set_ylabel('People', fontsize=14)
fig.savefig("plot_no_vaccine.pdf")
t, S, I, R = integrate_ode(derivatives, t_span, vaccine_rate=0.85)
fig, ax = plt.subplots(1, figsize=(10, 5))
ax.plot(t, S, label='Susceptible', c='black', linestyle='solid', linewidth=1.75)
ax.plot(t, I, label='Infected', c='black', linestyle='dotted', linewidth=1.75)
ax.plot(t, R, label='Recovered', c='black', linestyle='dashed', linewidth=1.75)
ax.legend(fontsize=14, frameon=True, ncol=3, bbox_to_anchor=(0.85, 1.13))
ax.set_xlabel('Time', fontsize=14)
ax.set_ylabel('People', fontsize=14)
fig.savefig("plot_with_vaccine.pdf")
| 31.822222
| 80
| 0.62081
|
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
def derivatives(t, y, vaccine_rate, birth_rate=0.01):
infection_rate = 0.3
recovery_rate = 0.02
death_rate = 0.01
S, I, R = y
N = S + I + R
dSdt = (
-((infection_rate * S * I) / N)
+ ((1 - vaccine_rate) * birth_rate * N)
- (death_rate * S)
)
dIdt = (
((infection_rate * S * I) / N)
- (recovery_rate * I)
- (death_rate * I)
)
dRdt = (
(recovery_rate * I)
- (death_rate * R)
+ (vaccine_rate * birth_rate * N)
)
return dSdt, dIdt, dRdt
def integrate_ode(
derivative_function,
t_span,
y0=(2999, 1, 0),
vaccine_rate=0.85,
birth_rate=0.01,
):
sol = solve_ivp(
derivative_function,
t_span,
y0,
args=(vaccine_rate, birth_rate),
)
ts, S, I, R = sol.t, sol.y[0], sol.y[1], sol.y[2]
return ts, S, I, R
t_span = [0, 730]
t, S, I, R = integrate_ode(derivatives, t_span, vaccine_rate=0.0)
fig, ax = plt.subplots(1, figsize=(10, 5))
ax.plot(t, S, label='Susceptible', c='black', linestyle='solid', linewidth=1.75)
ax.plot(t, I, label='Infected', c='black', linestyle='dotted', linewidth=1.75)
ax.plot(t, R, label='Recovered', c='black', linestyle='dashed', linewidth=1.75)
ax.legend(fontsize=14, frameon=True, ncol=3, bbox_to_anchor=(0.85, 1.13))
ax.set_xlabel('Time', fontsize=14)
ax.set_ylabel('People', fontsize=14)
fig.savefig("plot_no_vaccine.pdf")
t, S, I, R = integrate_ode(derivatives, t_span, vaccine_rate=0.85)
fig, ax = plt.subplots(1, figsize=(10, 5))
ax.plot(t, S, label='Susceptible', c='black', linestyle='solid', linewidth=1.75)
ax.plot(t, I, label='Infected', c='black', linestyle='dotted', linewidth=1.75)
ax.plot(t, R, label='Recovered', c='black', linestyle='dashed', linewidth=1.75)
ax.legend(fontsize=14, frameon=True, ncol=3, bbox_to_anchor=(0.85, 1.13))
ax.set_xlabel('Time', fontsize=14)
ax.set_ylabel('People', fontsize=14)
fig.savefig("plot_with_vaccine.pdf")
| true
| true
|
f717657e62b89fc8eff399b8be1e18a4646e310b
| 8,981
|
py
|
Python
|
alipay/aop/api/request/AlipayOpenServicemarketOrderCreateRequest.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/request/AlipayOpenServicemarketOrderCreateRequest.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/request/AlipayOpenServicemarketOrderCreateRequest.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenServicemarketOrderCreateRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._app_category_ids = None
self._app_desc = None
self._app_english_name = None
self._app_name = None
self._app_slogan = None
self._merchandise_id = None
self._merchant_pid = None
self._out_biz_no = None
self._service_email = None
self._service_phone = None
self._app_logo = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def app_category_ids(self):
return self._app_category_ids
@app_category_ids.setter
def app_category_ids(self, value):
self._app_category_ids = value
@property
def app_desc(self):
return self._app_desc
@app_desc.setter
def app_desc(self, value):
self._app_desc = value
@property
def app_english_name(self):
return self._app_english_name
@app_english_name.setter
def app_english_name(self, value):
self._app_english_name = value
@property
def app_name(self):
return self._app_name
@app_name.setter
def app_name(self, value):
self._app_name = value
@property
def app_slogan(self):
return self._app_slogan
@app_slogan.setter
def app_slogan(self, value):
self._app_slogan = value
@property
def merchandise_id(self):
return self._merchandise_id
@merchandise_id.setter
def merchandise_id(self, value):
self._merchandise_id = value
@property
def merchant_pid(self):
return self._merchant_pid
@merchant_pid.setter
def merchant_pid(self, value):
self._merchant_pid = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def service_email(self):
return self._service_email
@service_email.setter
def service_email(self, value):
self._service_email = value
@property
def service_phone(self):
return self._service_phone
@service_phone.setter
def service_phone(self, value):
self._service_phone = value
@property
def app_logo(self):
return self._app_logo
@app_logo.setter
def app_logo(self, value):
if not isinstance(value, FileItem):
return
self._app_logo = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.open.servicemarket.order.create'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.app_category_ids:
if hasattr(self.app_category_ids, 'to_alipay_dict'):
params['app_category_ids'] = json.dumps(obj=self.app_category_ids.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['app_category_ids'] = self.app_category_ids
if self.app_desc:
if hasattr(self.app_desc, 'to_alipay_dict'):
params['app_desc'] = json.dumps(obj=self.app_desc.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['app_desc'] = self.app_desc
if self.app_english_name:
if hasattr(self.app_english_name, 'to_alipay_dict'):
params['app_english_name'] = json.dumps(obj=self.app_english_name.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['app_english_name'] = self.app_english_name
if self.app_name:
if hasattr(self.app_name, 'to_alipay_dict'):
params['app_name'] = json.dumps(obj=self.app_name.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['app_name'] = self.app_name
if self.app_slogan:
if hasattr(self.app_slogan, 'to_alipay_dict'):
params['app_slogan'] = json.dumps(obj=self.app_slogan.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['app_slogan'] = self.app_slogan
if self.merchandise_id:
if hasattr(self.merchandise_id, 'to_alipay_dict'):
params['merchandise_id'] = json.dumps(obj=self.merchandise_id.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['merchandise_id'] = self.merchandise_id
if self.merchant_pid:
if hasattr(self.merchant_pid, 'to_alipay_dict'):
params['merchant_pid'] = json.dumps(obj=self.merchant_pid.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['merchant_pid'] = self.merchant_pid
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = json.dumps(obj=self.out_biz_no.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['out_biz_no'] = self.out_biz_no
if self.service_email:
if hasattr(self.service_email, 'to_alipay_dict'):
params['service_email'] = json.dumps(obj=self.service_email.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['service_email'] = self.service_email
if self.service_phone:
if hasattr(self.service_phone, 'to_alipay_dict'):
params['service_phone'] = json.dumps(obj=self.service_phone.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['service_phone'] = self.service_phone
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
if self.app_logo:
multipart_params['app_logo'] = self.app_logo
return multipart_params
| 33.262963
| 176
| 0.635119
|
import simplejson as json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenServicemarketOrderCreateRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._app_category_ids = None
self._app_desc = None
self._app_english_name = None
self._app_name = None
self._app_slogan = None
self._merchandise_id = None
self._merchant_pid = None
self._out_biz_no = None
self._service_email = None
self._service_phone = None
self._app_logo = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def app_category_ids(self):
return self._app_category_ids
@app_category_ids.setter
def app_category_ids(self, value):
self._app_category_ids = value
@property
def app_desc(self):
return self._app_desc
@app_desc.setter
def app_desc(self, value):
self._app_desc = value
@property
def app_english_name(self):
return self._app_english_name
@app_english_name.setter
def app_english_name(self, value):
self._app_english_name = value
@property
def app_name(self):
return self._app_name
@app_name.setter
def app_name(self, value):
self._app_name = value
@property
def app_slogan(self):
return self._app_slogan
@app_slogan.setter
def app_slogan(self, value):
self._app_slogan = value
@property
def merchandise_id(self):
return self._merchandise_id
@merchandise_id.setter
def merchandise_id(self, value):
self._merchandise_id = value
@property
def merchant_pid(self):
return self._merchant_pid
@merchant_pid.setter
def merchant_pid(self, value):
self._merchant_pid = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def service_email(self):
return self._service_email
@service_email.setter
def service_email(self, value):
self._service_email = value
@property
def service_phone(self):
return self._service_phone
@service_phone.setter
def service_phone(self, value):
self._service_phone = value
@property
def app_logo(self):
return self._app_logo
@app_logo.setter
def app_logo(self, value):
if not isinstance(value, FileItem):
return
self._app_logo = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.open.servicemarket.order.create'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.app_category_ids:
if hasattr(self.app_category_ids, 'to_alipay_dict'):
params['app_category_ids'] = json.dumps(obj=self.app_category_ids.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['app_category_ids'] = self.app_category_ids
if self.app_desc:
if hasattr(self.app_desc, 'to_alipay_dict'):
params['app_desc'] = json.dumps(obj=self.app_desc.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['app_desc'] = self.app_desc
if self.app_english_name:
if hasattr(self.app_english_name, 'to_alipay_dict'):
params['app_english_name'] = json.dumps(obj=self.app_english_name.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['app_english_name'] = self.app_english_name
if self.app_name:
if hasattr(self.app_name, 'to_alipay_dict'):
params['app_name'] = json.dumps(obj=self.app_name.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['app_name'] = self.app_name
if self.app_slogan:
if hasattr(self.app_slogan, 'to_alipay_dict'):
params['app_slogan'] = json.dumps(obj=self.app_slogan.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['app_slogan'] = self.app_slogan
if self.merchandise_id:
if hasattr(self.merchandise_id, 'to_alipay_dict'):
params['merchandise_id'] = json.dumps(obj=self.merchandise_id.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['merchandise_id'] = self.merchandise_id
if self.merchant_pid:
if hasattr(self.merchant_pid, 'to_alipay_dict'):
params['merchant_pid'] = json.dumps(obj=self.merchant_pid.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['merchant_pid'] = self.merchant_pid
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = json.dumps(obj=self.out_biz_no.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['out_biz_no'] = self.out_biz_no
if self.service_email:
if hasattr(self.service_email, 'to_alipay_dict'):
params['service_email'] = json.dumps(obj=self.service_email.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['service_email'] = self.service_email
if self.service_phone:
if hasattr(self.service_phone, 'to_alipay_dict'):
params['service_phone'] = json.dumps(obj=self.service_phone.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['service_phone'] = self.service_phone
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
if self.app_logo:
multipart_params['app_logo'] = self.app_logo
return multipart_params
| true
| true
|
f71767ca2c739f1474e2622061bcec8545048d82
| 18,990
|
py
|
Python
|
plugins/modules/oci_opsi_resource_forecast_trend_facts.py
|
sagar2938/oci-ansible-collection
|
5b8ce583a0d5d0aabf14494d61aea4649e18d1e6
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_opsi_resource_forecast_trend_facts.py
|
sagar2938/oci-ansible-collection
|
5b8ce583a0d5d0aabf14494d61aea4649e18d1e6
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_opsi_resource_forecast_trend_facts.py
|
sagar2938/oci-ansible-collection
|
5b8ce583a0d5d0aabf14494d61aea4649e18d1e6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_opsi_resource_forecast_trend_facts
short_description: Fetches details about a ResourceForecastTrend resource in Oracle Cloud Infrastructure
description:
- Fetches details about a ResourceForecastTrend resource in Oracle Cloud Infrastructure
- Get Forecast predictions for CPU and Storage resources since a time in the past.
If compartmentIdInSubtree is specified, aggregates resources in a compartment and in all sub-compartments.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment.
type: str
required: true
resource_metric:
description:
- Filter by resource metric.
Supported values are CPU , STORAGE, MEMORY and IO.
type: str
required: true
analysis_time_interval:
description:
- Specify time period in ISO 8601 format with respect to current time.
Default is last 30 days represented by P30D.
If timeInterval is specified, then timeIntervalStart and timeIntervalEnd will be ignored.
Examples P90D (last 90 days), P4W (last 4 weeks), P2M (last 2 months), P1Y (last 12 months), . Maximum value allowed is 25 months prior to
current time (P25M).
type: str
time_interval_start:
description:
- Analysis start time in UTC in ISO 8601 format(inclusive).
Example 2019-10-30T00:00:00Z (yyyy-MM-ddThh:mm:ssZ).
The minimum allowed value is 2 years prior to the current day.
timeIntervalStart and timeIntervalEnd parameters are used together.
If analysisTimeInterval is specified, this parameter is ignored.
type: str
time_interval_end:
description:
- Analysis end time in UTC in ISO 8601 format(exclusive).
Example 2019-10-30T00:00:00Z (yyyy-MM-ddThh:mm:ssZ).
timeIntervalStart and timeIntervalEnd are used together.
If timeIntervalEnd is not specified, current time is used as timeIntervalEnd.
type: str
database_type:
description:
- Filter by one or more database type.
Possible values are ADW-S, ATP-S, ADW-D, ATP-D, EXTERNAL-PDB, EXTERNAL-NONCDB.
type: list
elements: str
choices:
- "ADW-S"
- "ATP-S"
- "ADW-D"
- "ATP-D"
- "EXTERNAL-PDB"
- "EXTERNAL-NONCDB"
database_id:
description:
- Optional list of database L(OCIDs,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the associated DBaaS entity.
type: list
elements: str
id:
description:
- Optional list of database insight resource L(OCIDs,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
type: list
elements: str
exadata_insight_id:
description:
- Optional list of exadata insight resource L(OCIDs,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
type: list
elements: str
cdb_name:
description:
- Filter by one or more cdb name.
type: list
elements: str
statistic:
description:
- Choose the type of statistic metric data to be used for forecasting.
type: str
choices:
- "AVG"
- "MAX"
forecast_days:
description:
- Number of days used for utilization forecast analysis.
type: int
forecast_model:
description:
- "Choose algorithm model for the forecasting.
Possible values:
- LINEAR: Uses linear regression algorithm for forecasting.
- ML_AUTO: Automatically detects best algorithm to use for forecasting.
- ML_NO_AUTO: Automatically detects seasonality of the data for forecasting using linear or seasonal algorithm."
type: str
choices:
- "LINEAR"
- "ML_AUTO"
- "ML_NO_AUTO"
utilization_level:
description:
- "Filter by utilization level by the following buckets:
- HIGH_UTILIZATION: DBs with utilization greater or equal than 75.
- LOW_UTILIZATION: DBs with utilization lower than 25.
- MEDIUM_HIGH_UTILIZATION: DBs with utilization greater or equal than 50 but lower than 75.
- MEDIUM_LOW_UTILIZATION: DBs with utilization greater or equal than 25 but lower than 50."
type: str
choices:
- "HIGH_UTILIZATION"
- "LOW_UTILIZATION"
- "MEDIUM_HIGH_UTILIZATION"
- "MEDIUM_LOW_UTILIZATION"
confidence:
description:
- This parameter is used to change data's confidence level, this data is ingested by the
forecast algorithm.
Confidence is the probability of an interval to contain the expected population parameter.
Manipulation of this value will lead to different results.
If not set, default confidence value is 95%.
type: int
host_name:
description:
- Filter by one or more hostname.
type: list
elements: str
tablespace_name:
description:
- Tablespace name for a database
type: str
is_database_instance_level_metrics:
description:
- Flag to indicate if database instance level metrics should be returned. The flag is ignored when a host name filter is not applied.
When a hostname filter is applied this flag will determine whether to return metrics for the instances located on the specified host or for the
whole database which contains an instance on this host.
type: bool
defined_tag_equals:
description:
- "A list of tag filters to apply. Only resources with a defined tag matching the value will be returned.
Each item in the list has the format \\"{namespace}.{tagName}.{value}\\". All inputs are case-insensitive.
Multiple values for the same key (i.e. same namespace and tag name) are interpreted as \\"OR\\".
Values for different keys (i.e. different namespaces, different tag names, or both) are interpreted as \\"AND\\"."
type: list
elements: str
freeform_tag_equals:
description:
- "A list of tag filters to apply. Only resources with a freeform tag matching the value will be returned.
The key for each tag is \\"{tagName}.{value}\\". All inputs are case-insensitive.
Multiple values for the same tag name are interpreted as \\"OR\\". Values for different tag names are interpreted as \\"AND\\"."
type: list
elements: str
defined_tag_exists:
description:
- "A list of tag existence filters to apply. Only resources for which the specified defined tags exist will be returned.
Each item in the list has the format \\"{namespace}.{tagName}.true\\" (for checking existence of a defined tag)
or \\"{namespace}.true\\". All inputs are case-insensitive.
Currently, only existence (\\"true\\" at the end) is supported. Absence (\\"false\\" at the end) is not supported.
Multiple values for the same key (i.e. same namespace and tag name) are interpreted as \\"OR\\".
Values for different keys (i.e. different namespaces, different tag names, or both) are interpreted as \\"AND\\"."
type: list
elements: str
freeform_tag_exists:
description:
- "A list of tag existence filters to apply. Only resources for which the specified freeform tags exist the value will be returned.
The key for each tag is \\"{tagName}.true\\". All inputs are case-insensitive.
Currently, only existence (\\"true\\" at the end) is supported. Absence (\\"false\\" at the end) is not supported.
Multiple values for different tag names are interpreted as \\"AND\\"."
type: list
elements: str
compartment_id_in_subtree:
description:
- A flag to search all resources within a given compartment and all sub-compartments.
type: bool
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific resource_forecast_trend
oci_opsi_resource_forecast_trend_facts:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
resource_metric: resource_metric_example
# optional
analysis_time_interval: analysis_time_interval_example
time_interval_start: 2013-10-20T19:20:30+01:00
time_interval_end: 2013-10-20T19:20:30+01:00
database_type: [ "$p.getValue()" ]
database_id: [ "$p.getValue()" ]
id: [ "$p.getValue()" ]
exadata_insight_id: [ "$p.getValue()" ]
cdb_name: [ "$p.getValue()" ]
statistic: AVG
forecast_days: 56
forecast_model: LINEAR
utilization_level: HIGH_UTILIZATION
confidence: 56
host_name: [ "$p.getValue()" ]
tablespace_name: tablespace_name_example
is_database_instance_level_metrics: true
defined_tag_equals: [ "$p.getValue()" ]
freeform_tag_equals: [ "$p.getValue()" ]
defined_tag_exists: [ "$p.getValue()" ]
freeform_tag_exists: [ "$p.getValue()" ]
compartment_id_in_subtree: true
"""
RETURN = """
resource_forecast_trend:
description:
- ResourceForecastTrend resource
returned: on success
type: complex
contains:
time_interval_start:
description:
- The start timestamp that was passed into the request.
returned: on success
type: str
sample: "2020-12-06T00:00:00.000Z"
time_interval_end:
description:
- The end timestamp that was passed into the request.
returned: on success
type: str
sample: "2020-12-06T00:00:00.000Z"
resource_metric:
description:
- "Defines the type of resource metric (example: CPU, STORAGE)"
returned: on success
type: str
sample: STORAGE
usage_unit:
description:
- Displays usage unit ( CORES, GB)
returned: on success
type: str
sample: CORES
pattern:
description:
- Time series patterns used in the forecasting.
returned: on success
type: str
sample: LINEAR
tablespace_name:
description:
- The name of tablespace.
returned: on success
type: str
sample: tablespace_name_example
historical_data:
description:
- Time series data used for the forecast analysis.
returned: on success
type: complex
contains:
end_timestamp:
description:
- The timestamp in which the current sampling period ends in RFC 3339 format.
returned: on success
type: str
sample: "2020-05-01T00:00:00.000Z"
usage:
description:
- Total amount used of the resource metric type (CPU, STORAGE).
returned: on success
type: float
sample: 34.5
projected_data:
description:
- Time series data result of the forecasting analysis.
returned: on success
type: complex
contains:
end_timestamp:
description:
- The timestamp in which the current sampling period ends in RFC 3339 format.
returned: on success
type: str
sample: "2020-05-01T00:00:00.000Z"
usage:
description:
- Total amount used of the resource metric type (CPU, STORAGE).
returned: on success
type: float
sample: 34.5
high_value:
description:
- Upper uncertainty bound of the current usage value.
returned: on success
type: float
sample: 1.2
low_value:
description:
- Lower uncertainty bound of the current usage value.
returned: on success
type: float
sample: 1.2
sample: {
"time_interval_start": "2020-12-06T00:00:00.000Z",
"time_interval_end": "2020-12-06T00:00:00.000Z",
"resource_metric": "STORAGE",
"usage_unit": "CORES",
"pattern": "LINEAR",
"tablespace_name": "tablespace_name_example",
"historical_data": [{
"end_timestamp": "2020-05-01T00:00:00.000Z",
"usage": 34.5
}],
"projected_data": [{
"end_timestamp": "2020-05-01T00:00:00.000Z",
"usage": 34.5,
"high_value": 1.2,
"low_value": 1.2
}]
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.opsi import OperationsInsightsClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class ResourceForecastTrendFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get"""
def get_required_params_for_get(self):
return [
"compartment_id",
"resource_metric",
]
def get_resource(self):
optional_get_method_params = [
"analysis_time_interval",
"time_interval_start",
"time_interval_end",
"database_type",
"database_id",
"id",
"exadata_insight_id",
"cdb_name",
"statistic",
"forecast_days",
"forecast_model",
"utilization_level",
"confidence",
"host_name",
"tablespace_name",
"is_database_instance_level_metrics",
"defined_tag_equals",
"freeform_tag_equals",
"defined_tag_exists",
"freeform_tag_exists",
"compartment_id_in_subtree",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_get_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.call_with_backoff(
self.client.summarize_database_insight_resource_forecast_trend,
compartment_id=self.module.params.get("compartment_id"),
resource_metric=self.module.params.get("resource_metric"),
**optional_kwargs
)
ResourceForecastTrendFactsHelperCustom = get_custom_class(
"ResourceForecastTrendFactsHelperCustom"
)
class ResourceFactsHelper(
ResourceForecastTrendFactsHelperCustom, ResourceForecastTrendFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
compartment_id=dict(type="str", required=True),
resource_metric=dict(type="str", required=True),
analysis_time_interval=dict(type="str"),
time_interval_start=dict(type="str"),
time_interval_end=dict(type="str"),
database_type=dict(
type="list",
elements="str",
choices=[
"ADW-S",
"ATP-S",
"ADW-D",
"ATP-D",
"EXTERNAL-PDB",
"EXTERNAL-NONCDB",
],
),
database_id=dict(type="list", elements="str"),
id=dict(type="list", elements="str"),
exadata_insight_id=dict(type="list", elements="str"),
cdb_name=dict(type="list", elements="str"),
statistic=dict(type="str", choices=["AVG", "MAX"]),
forecast_days=dict(type="int"),
forecast_model=dict(
type="str", choices=["LINEAR", "ML_AUTO", "ML_NO_AUTO"]
),
utilization_level=dict(
type="str",
choices=[
"HIGH_UTILIZATION",
"LOW_UTILIZATION",
"MEDIUM_HIGH_UTILIZATION",
"MEDIUM_LOW_UTILIZATION",
],
),
confidence=dict(type="int"),
host_name=dict(type="list", elements="str"),
tablespace_name=dict(type="str"),
is_database_instance_level_metrics=dict(type="bool"),
defined_tag_equals=dict(type="list", elements="str"),
freeform_tag_equals=dict(type="list", elements="str"),
defined_tag_exists=dict(type="list", elements="str"),
freeform_tag_exists=dict(type="list", elements="str"),
compartment_id_in_subtree=dict(type="bool"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="resource_forecast_trend",
service_client_class=OperationsInsightsClient,
namespace="opsi",
)
result = []
if resource_facts_helper.is_get():
result = resource_facts_helper.get()
else:
resource_facts_helper.fail()
module.exit_json(resource_forecast_trend=result)
if __name__ == "__main__":
main()
| 38.99384
| 157
| 0.597999
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_opsi_resource_forecast_trend_facts
short_description: Fetches details about a ResourceForecastTrend resource in Oracle Cloud Infrastructure
description:
- Fetches details about a ResourceForecastTrend resource in Oracle Cloud Infrastructure
- Get Forecast predictions for CPU and Storage resources since a time in the past.
If compartmentIdInSubtree is specified, aggregates resources in a compartment and in all sub-compartments.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment.
type: str
required: true
resource_metric:
description:
- Filter by resource metric.
Supported values are CPU , STORAGE, MEMORY and IO.
type: str
required: true
analysis_time_interval:
description:
- Specify time period in ISO 8601 format with respect to current time.
Default is last 30 days represented by P30D.
If timeInterval is specified, then timeIntervalStart and timeIntervalEnd will be ignored.
Examples P90D (last 90 days), P4W (last 4 weeks), P2M (last 2 months), P1Y (last 12 months), . Maximum value allowed is 25 months prior to
current time (P25M).
type: str
time_interval_start:
description:
- Analysis start time in UTC in ISO 8601 format(inclusive).
Example 2019-10-30T00:00:00Z (yyyy-MM-ddThh:mm:ssZ).
The minimum allowed value is 2 years prior to the current day.
timeIntervalStart and timeIntervalEnd parameters are used together.
If analysisTimeInterval is specified, this parameter is ignored.
type: str
time_interval_end:
description:
- Analysis end time in UTC in ISO 8601 format(exclusive).
Example 2019-10-30T00:00:00Z (yyyy-MM-ddThh:mm:ssZ).
timeIntervalStart and timeIntervalEnd are used together.
If timeIntervalEnd is not specified, current time is used as timeIntervalEnd.
type: str
database_type:
description:
- Filter by one or more database type.
Possible values are ADW-S, ATP-S, ADW-D, ATP-D, EXTERNAL-PDB, EXTERNAL-NONCDB.
type: list
elements: str
choices:
- "ADW-S"
- "ATP-S"
- "ADW-D"
- "ATP-D"
- "EXTERNAL-PDB"
- "EXTERNAL-NONCDB"
database_id:
description:
- Optional list of database L(OCIDs,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the associated DBaaS entity.
type: list
elements: str
id:
description:
- Optional list of database insight resource L(OCIDs,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
type: list
elements: str
exadata_insight_id:
description:
- Optional list of exadata insight resource L(OCIDs,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
type: list
elements: str
cdb_name:
description:
- Filter by one or more cdb name.
type: list
elements: str
statistic:
description:
- Choose the type of statistic metric data to be used for forecasting.
type: str
choices:
- "AVG"
- "MAX"
forecast_days:
description:
- Number of days used for utilization forecast analysis.
type: int
forecast_model:
description:
- "Choose algorithm model for the forecasting.
Possible values:
- LINEAR: Uses linear regression algorithm for forecasting.
- ML_AUTO: Automatically detects best algorithm to use for forecasting.
- ML_NO_AUTO: Automatically detects seasonality of the data for forecasting using linear or seasonal algorithm."
type: str
choices:
- "LINEAR"
- "ML_AUTO"
- "ML_NO_AUTO"
utilization_level:
description:
- "Filter by utilization level by the following buckets:
- HIGH_UTILIZATION: DBs with utilization greater or equal than 75.
- LOW_UTILIZATION: DBs with utilization lower than 25.
- MEDIUM_HIGH_UTILIZATION: DBs with utilization greater or equal than 50 but lower than 75.
- MEDIUM_LOW_UTILIZATION: DBs with utilization greater or equal than 25 but lower than 50."
type: str
choices:
- "HIGH_UTILIZATION"
- "LOW_UTILIZATION"
- "MEDIUM_HIGH_UTILIZATION"
- "MEDIUM_LOW_UTILIZATION"
confidence:
description:
- This parameter is used to change data's confidence level, this data is ingested by the
forecast algorithm.
Confidence is the probability of an interval to contain the expected population parameter.
Manipulation of this value will lead to different results.
If not set, default confidence value is 95%.
type: int
host_name:
description:
- Filter by one or more hostname.
type: list
elements: str
tablespace_name:
description:
- Tablespace name for a database
type: str
is_database_instance_level_metrics:
description:
- Flag to indicate if database instance level metrics should be returned. The flag is ignored when a host name filter is not applied.
When a hostname filter is applied this flag will determine whether to return metrics for the instances located on the specified host or for the
whole database which contains an instance on this host.
type: bool
defined_tag_equals:
description:
- "A list of tag filters to apply. Only resources with a defined tag matching the value will be returned.
Each item in the list has the format \\"{namespace}.{tagName}.{value}\\". All inputs are case-insensitive.
Multiple values for the same key (i.e. same namespace and tag name) are interpreted as \\"OR\\".
Values for different keys (i.e. different namespaces, different tag names, or both) are interpreted as \\"AND\\"."
type: list
elements: str
freeform_tag_equals:
description:
- "A list of tag filters to apply. Only resources with a freeform tag matching the value will be returned.
The key for each tag is \\"{tagName}.{value}\\". All inputs are case-insensitive.
Multiple values for the same tag name are interpreted as \\"OR\\". Values for different tag names are interpreted as \\"AND\\"."
type: list
elements: str
defined_tag_exists:
description:
- "A list of tag existence filters to apply. Only resources for which the specified defined tags exist will be returned.
Each item in the list has the format \\"{namespace}.{tagName}.true\\" (for checking existence of a defined tag)
or \\"{namespace}.true\\". All inputs are case-insensitive.
Currently, only existence (\\"true\\" at the end) is supported. Absence (\\"false\\" at the end) is not supported.
Multiple values for the same key (i.e. same namespace and tag name) are interpreted as \\"OR\\".
Values for different keys (i.e. different namespaces, different tag names, or both) are interpreted as \\"AND\\"."
type: list
elements: str
freeform_tag_exists:
description:
- "A list of tag existence filters to apply. Only resources for which the specified freeform tags exist the value will be returned.
The key for each tag is \\"{tagName}.true\\". All inputs are case-insensitive.
Currently, only existence (\\"true\\" at the end) is supported. Absence (\\"false\\" at the end) is not supported.
Multiple values for different tag names are interpreted as \\"AND\\"."
type: list
elements: str
compartment_id_in_subtree:
description:
- A flag to search all resources within a given compartment and all sub-compartments.
type: bool
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific resource_forecast_trend
oci_opsi_resource_forecast_trend_facts:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
resource_metric: resource_metric_example
# optional
analysis_time_interval: analysis_time_interval_example
time_interval_start: 2013-10-20T19:20:30+01:00
time_interval_end: 2013-10-20T19:20:30+01:00
database_type: [ "$p.getValue()" ]
database_id: [ "$p.getValue()" ]
id: [ "$p.getValue()" ]
exadata_insight_id: [ "$p.getValue()" ]
cdb_name: [ "$p.getValue()" ]
statistic: AVG
forecast_days: 56
forecast_model: LINEAR
utilization_level: HIGH_UTILIZATION
confidence: 56
host_name: [ "$p.getValue()" ]
tablespace_name: tablespace_name_example
is_database_instance_level_metrics: true
defined_tag_equals: [ "$p.getValue()" ]
freeform_tag_equals: [ "$p.getValue()" ]
defined_tag_exists: [ "$p.getValue()" ]
freeform_tag_exists: [ "$p.getValue()" ]
compartment_id_in_subtree: true
"""
RETURN = """
resource_forecast_trend:
description:
- ResourceForecastTrend resource
returned: on success
type: complex
contains:
time_interval_start:
description:
- The start timestamp that was passed into the request.
returned: on success
type: str
sample: "2020-12-06T00:00:00.000Z"
time_interval_end:
description:
- The end timestamp that was passed into the request.
returned: on success
type: str
sample: "2020-12-06T00:00:00.000Z"
resource_metric:
description:
- "Defines the type of resource metric (example: CPU, STORAGE)"
returned: on success
type: str
sample: STORAGE
usage_unit:
description:
- Displays usage unit ( CORES, GB)
returned: on success
type: str
sample: CORES
pattern:
description:
- Time series patterns used in the forecasting.
returned: on success
type: str
sample: LINEAR
tablespace_name:
description:
- The name of tablespace.
returned: on success
type: str
sample: tablespace_name_example
historical_data:
description:
- Time series data used for the forecast analysis.
returned: on success
type: complex
contains:
end_timestamp:
description:
- The timestamp in which the current sampling period ends in RFC 3339 format.
returned: on success
type: str
sample: "2020-05-01T00:00:00.000Z"
usage:
description:
- Total amount used of the resource metric type (CPU, STORAGE).
returned: on success
type: float
sample: 34.5
projected_data:
description:
- Time series data result of the forecasting analysis.
returned: on success
type: complex
contains:
end_timestamp:
description:
- The timestamp in which the current sampling period ends in RFC 3339 format.
returned: on success
type: str
sample: "2020-05-01T00:00:00.000Z"
usage:
description:
- Total amount used of the resource metric type (CPU, STORAGE).
returned: on success
type: float
sample: 34.5
high_value:
description:
- Upper uncertainty bound of the current usage value.
returned: on success
type: float
sample: 1.2
low_value:
description:
- Lower uncertainty bound of the current usage value.
returned: on success
type: float
sample: 1.2
sample: {
"time_interval_start": "2020-12-06T00:00:00.000Z",
"time_interval_end": "2020-12-06T00:00:00.000Z",
"resource_metric": "STORAGE",
"usage_unit": "CORES",
"pattern": "LINEAR",
"tablespace_name": "tablespace_name_example",
"historical_data": [{
"end_timestamp": "2020-05-01T00:00:00.000Z",
"usage": 34.5
}],
"projected_data": [{
"end_timestamp": "2020-05-01T00:00:00.000Z",
"usage": 34.5,
"high_value": 1.2,
"low_value": 1.2
}]
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.opsi import OperationsInsightsClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class ResourceForecastTrendFactsHelperGen(OCIResourceFactsHelperBase):
def get_required_params_for_get(self):
return [
"compartment_id",
"resource_metric",
]
def get_resource(self):
optional_get_method_params = [
"analysis_time_interval",
"time_interval_start",
"time_interval_end",
"database_type",
"database_id",
"id",
"exadata_insight_id",
"cdb_name",
"statistic",
"forecast_days",
"forecast_model",
"utilization_level",
"confidence",
"host_name",
"tablespace_name",
"is_database_instance_level_metrics",
"defined_tag_equals",
"freeform_tag_equals",
"defined_tag_exists",
"freeform_tag_exists",
"compartment_id_in_subtree",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_get_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.call_with_backoff(
self.client.summarize_database_insight_resource_forecast_trend,
compartment_id=self.module.params.get("compartment_id"),
resource_metric=self.module.params.get("resource_metric"),
**optional_kwargs
)
ResourceForecastTrendFactsHelperCustom = get_custom_class(
"ResourceForecastTrendFactsHelperCustom"
)
class ResourceFactsHelper(
ResourceForecastTrendFactsHelperCustom, ResourceForecastTrendFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
compartment_id=dict(type="str", required=True),
resource_metric=dict(type="str", required=True),
analysis_time_interval=dict(type="str"),
time_interval_start=dict(type="str"),
time_interval_end=dict(type="str"),
database_type=dict(
type="list",
elements="str",
choices=[
"ADW-S",
"ATP-S",
"ADW-D",
"ATP-D",
"EXTERNAL-PDB",
"EXTERNAL-NONCDB",
],
),
database_id=dict(type="list", elements="str"),
id=dict(type="list", elements="str"),
exadata_insight_id=dict(type="list", elements="str"),
cdb_name=dict(type="list", elements="str"),
statistic=dict(type="str", choices=["AVG", "MAX"]),
forecast_days=dict(type="int"),
forecast_model=dict(
type="str", choices=["LINEAR", "ML_AUTO", "ML_NO_AUTO"]
),
utilization_level=dict(
type="str",
choices=[
"HIGH_UTILIZATION",
"LOW_UTILIZATION",
"MEDIUM_HIGH_UTILIZATION",
"MEDIUM_LOW_UTILIZATION",
],
),
confidence=dict(type="int"),
host_name=dict(type="list", elements="str"),
tablespace_name=dict(type="str"),
is_database_instance_level_metrics=dict(type="bool"),
defined_tag_equals=dict(type="list", elements="str"),
freeform_tag_equals=dict(type="list", elements="str"),
defined_tag_exists=dict(type="list", elements="str"),
freeform_tag_exists=dict(type="list", elements="str"),
compartment_id_in_subtree=dict(type="bool"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="resource_forecast_trend",
service_client_class=OperationsInsightsClient,
namespace="opsi",
)
result = []
if resource_facts_helper.is_get():
result = resource_facts_helper.get()
else:
resource_facts_helper.fail()
module.exit_json(resource_forecast_trend=result)
if __name__ == "__main__":
main()
| true
| true
|
f71767d27ceb9c0a11fa4ade0519de1ef382cf0f
| 1,385
|
py
|
Python
|
2020/src/day6.py
|
vionion/advent-of-code-2019
|
c3389694a42e9b1d978f82c6fb42925465799734
|
[
"MIT"
] | null | null | null |
2020/src/day6.py
|
vionion/advent-of-code-2019
|
c3389694a42e9b1d978f82c6fb42925465799734
|
[
"MIT"
] | null | null | null |
2020/src/day6.py
|
vionion/advent-of-code-2019
|
c3389694a42e9b1d978f82c6fb42925465799734
|
[
"MIT"
] | null | null | null |
from typing import List, Set
from io_utils import read_input_file
def day6_1():
input_list = read_input_file("day6.txt", input_type=str)
answers = get_all_yes_answers_per_group(input_list)
amount_yes_answers = 0
for answers_per_group in answers:
amount_yes_answers += len(answers_per_group)
return amount_yes_answers
def get_all_yes_answers_per_group(input_list):
answers: List[Set[str]] = []
i = 0
for line in input_list:
if line == "\n":
i += 1
else:
if i == len(answers):
answers.append(set())
line = line.strip()
for char in line:
answers[i].add(char)
return answers
def get_common_yes_answers_per_group(input_list):
answers: List[Set[str]] = []
i = 0
for line in input_list:
if line == "\n":
i += 1
else:
line = line.strip()
if i == len(answers):
answers.append(set(line))
else:
answers[i] = answers[i].intersection(line)
return answers
def day6_2():
input_list = read_input_file("day6.txt", input_type=str)
answers = get_common_yes_answers_per_group(input_list)
amount_yes_answers = 0
for answers_per_group in answers:
amount_yes_answers += len(answers_per_group)
return amount_yes_answers
| 26.634615
| 60
| 0.61083
|
from typing import List, Set
from io_utils import read_input_file
def day6_1():
input_list = read_input_file("day6.txt", input_type=str)
answers = get_all_yes_answers_per_group(input_list)
amount_yes_answers = 0
for answers_per_group in answers:
amount_yes_answers += len(answers_per_group)
return amount_yes_answers
def get_all_yes_answers_per_group(input_list):
answers: List[Set[str]] = []
i = 0
for line in input_list:
if line == "\n":
i += 1
else:
if i == len(answers):
answers.append(set())
line = line.strip()
for char in line:
answers[i].add(char)
return answers
def get_common_yes_answers_per_group(input_list):
answers: List[Set[str]] = []
i = 0
for line in input_list:
if line == "\n":
i += 1
else:
line = line.strip()
if i == len(answers):
answers.append(set(line))
else:
answers[i] = answers[i].intersection(line)
return answers
def day6_2():
input_list = read_input_file("day6.txt", input_type=str)
answers = get_common_yes_answers_per_group(input_list)
amount_yes_answers = 0
for answers_per_group in answers:
amount_yes_answers += len(answers_per_group)
return amount_yes_answers
| true
| true
|
f71768dfa296b62f40248813c15aa926044590df
| 360
|
py
|
Python
|
votes/migrations/0002_auto_20190529_0721.py
|
isidaruk/eurovision_project
|
976743e66a2fed17c0513f17a9a7d35850e9cde5
|
[
"MIT"
] | null | null | null |
votes/migrations/0002_auto_20190529_0721.py
|
isidaruk/eurovision_project
|
976743e66a2fed17c0513f17a9a7d35850e9cde5
|
[
"MIT"
] | 8
|
2020-02-12T00:23:27.000Z
|
2022-03-08T21:10:13.000Z
|
votes/migrations/0002_auto_20190529_0721.py
|
isidaruk/eurovision_project
|
976743e66a2fed17c0513f17a9a7d35850e9cde5
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.1 on 2019-05-29 07:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('votes', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='vote',
old_name='to_country',
new_name='to_participant',
),
]
| 18.947368
| 47
| 0.580556
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('votes', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='vote',
old_name='to_country',
new_name='to_participant',
),
]
| true
| true
|
f717690a322e1a696f5c7c83ea215426620aa34e
| 673
|
py
|
Python
|
src/data/456.py
|
NULLCT/LOMC
|
79a16474a8f21310e0fb47e536d527dd5dc6d655
|
[
"MIT"
] | null | null | null |
src/data/456.py
|
NULLCT/LOMC
|
79a16474a8f21310e0fb47e536d527dd5dc6d655
|
[
"MIT"
] | null | null | null |
src/data/456.py
|
NULLCT/LOMC
|
79a16474a8f21310e0fb47e536d527dd5dc6d655
|
[
"MIT"
] | null | null | null |
from collections import deque
n, m = map(int, input().split())
graph = [list() for _ in range(n)]
for _ in range(n - 1):
u, k = [int(x) for x in input().split()] # uは頂点番号、kは隣接頂点の個数
u, k = u - 1, k - 1
graph[u].append(k)
graph[k].append(u) # 無向グラフ
dist = [-1] * n #距離
dist[0] = 0 #startは0
q = deque()
q.append(0) #startは0
while q: #qが空になるまで
v = q.popleft()
for x in graph[v]:
if dist[x] != -1: #更新
continue
dist[x] = 1 - dist[v]
q.append(x)
for i in range(m):
c, d = map(int, input().split())
c, d = c - 1, d - 1
if dist[c] != dist[d]:
print("Road")
else:
print("Town")
| 21.03125
| 64
| 0.499257
|
from collections import deque
n, m = map(int, input().split())
graph = [list() for _ in range(n)]
for _ in range(n - 1):
u, k = [int(x) for x in input().split()]
u, k = u - 1, k - 1
graph[u].append(k)
graph[k].append(u)
dist = [-1] * n
dist[0] = 0
q = deque()
q.append(0)
while q:
v = q.popleft()
for x in graph[v]:
if dist[x] != -1:
continue
dist[x] = 1 - dist[v]
q.append(x)
for i in range(m):
c, d = map(int, input().split())
c, d = c - 1, d - 1
if dist[c] != dist[d]:
print("Road")
else:
print("Town")
| true
| true
|
f71769abced2b28f75a37afaba30e87febaaf7f8
| 2,143
|
py
|
Python
|
Tracker/update.py
|
nordwind80/BT-Tracker
|
558c15b399871c1ca11d0c4ae1eb598e3060931e
|
[
"MIT"
] | 1
|
2019-05-05T06:46:27.000Z
|
2019-05-05T06:46:27.000Z
|
Tracker/update.py
|
nordwind80/BT-Tracker
|
558c15b399871c1ca11d0c4ae1eb598e3060931e
|
[
"MIT"
] | null | null | null |
Tracker/update.py
|
nordwind80/BT-Tracker
|
558c15b399871c1ca11d0c4ae1eb598e3060931e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Author: eaglewings
# E-Mail: ZWFnbGV3aW5ncy55aUBnbWFpbC5jb20=
# Created Time: 2019-04-17 15:17
# Last Modified:
# Description:
# - Project: BT Trackers Updater
# - File Name: update.py
# - Trackers Updater
import os
import re
from typing import NoReturn
class Filer(object):
def __init__(self):
self._aria2_path = "/.aria2/"
self._file_name = "aria2.conf"
self._file_path = f"{self._get_home}{self._aria2_path}"
@property
def _get_home(self) -> str:
"""
Return User $HOME path.
:return: str
"""
return os.path.expanduser("~")
@property
def get_path(self) -> str:
"""
Return Aria2 config file path.
:return: str
"""
return f"{self._file_path}{self._file_name}"
def _create_dir(self) -> NoReturn:
try:
os.mkdir(f"{self._file_path}")
except FileExistsError as why:
print(f"Create directory failed. {why}")
def check_dirctory(self) -> bool:
"""
Find Aria2 directory, If not create it. If mkdir fail, raise FileExistsError error.
:return: NoReturn
"""
if os.path.exists(f"{self._file_path}"):
return True
else:
self._create_dir()
return False
class Updater(object):
def __init__(self, path: str, trackers: str):
self._path = path
self._trackers = trackers
def start(self) -> NoReturn:
check = False
with open(self._path, "r+") as file:
lines = file.readlines()
file.seek(0)
file.truncate()
for line in lines:
if re.search(r"bt-tracker=.*", line):
line = line.replace(line, f"bt-tracker={self._trackers}\n")
file.write(line)
check = True
else:
file.write(line)
else:
if check:
return
else:
file.write(f"bt-tracker={self._trackers}\n")
| 26.45679
| 95
| 0.527765
|
import os
import re
from typing import NoReturn
class Filer(object):
def __init__(self):
self._aria2_path = "/.aria2/"
self._file_name = "aria2.conf"
self._file_path = f"{self._get_home}{self._aria2_path}"
@property
def _get_home(self) -> str:
return os.path.expanduser("~")
@property
def get_path(self) -> str:
return f"{self._file_path}{self._file_name}"
def _create_dir(self) -> NoReturn:
try:
os.mkdir(f"{self._file_path}")
except FileExistsError as why:
print(f"Create directory failed. {why}")
def check_dirctory(self) -> bool:
if os.path.exists(f"{self._file_path}"):
return True
else:
self._create_dir()
return False
class Updater(object):
def __init__(self, path: str, trackers: str):
self._path = path
self._trackers = trackers
def start(self) -> NoReturn:
check = False
with open(self._path, "r+") as file:
lines = file.readlines()
file.seek(0)
file.truncate()
for line in lines:
if re.search(r"bt-tracker=.*", line):
line = line.replace(line, f"bt-tracker={self._trackers}\n")
file.write(line)
check = True
else:
file.write(line)
else:
if check:
return
else:
file.write(f"bt-tracker={self._trackers}\n")
| true
| true
|
f7176bc8b9827c81ae4f1e4df6897e9563ad218f
| 2,628
|
py
|
Python
|
source/boundaryconds.py
|
agstub/sglake-detectability
|
5556250a59d7f500bcee86899dd9a497a368faca
|
[
"MIT"
] | 1
|
2021-05-27T12:24:35.000Z
|
2021-05-27T12:24:35.000Z
|
source/boundaryconds.py
|
ldeo-glaciology/sglake-detectability
|
5556250a59d7f500bcee86899dd9a497a368faca
|
[
"MIT"
] | null | null | null |
source/boundaryconds.py
|
ldeo-glaciology/sglake-detectability
|
5556250a59d7f500bcee86899dd9a497a368faca
|
[
"MIT"
] | null | null | null |
#-------------------------------------------------------------------------------
# This file contains functions that:
# (1) define the boundaries (ice-air,ice-water,ice-bed) of the mesh, AND...
# (2) mark the boundaries of the mesh
#-------------------------------------------------------------------------------
from params import tol,Lngth,Hght
from geometry import bed
import numpy as np
from dolfin import *
#-------------------------------------------------------------------------------
# Define SubDomains for ice-water boundary, ice-bed boundary, inflow (x=0) and
# outflow (x=Length of domain). The parameter 'tol' is a minimal water depth
# used to distinguish the ice-water and ice-bed surfaces.
class WaterBoundary(SubDomain):
# Ice-water boundary.
# This boundary is marked first and all of the irrelevant portions are
# overwritten by the other boundary markers.
def inside(self, x, on_boundary):
return (on_boundary and (x[1]<0.5*Hght))
class BedBoundary(SubDomain):
# Ice-bed boundary away from the lake; the portions near the lake are overwritten
# by BasinBoundary.
# Lifting of ice from the bed *is not* allowed on this boundary.
def inside(self, x, on_boundary):
return (on_boundary and ((x[1]-bed(x[0]))<=tol))
class LeftBoundary(SubDomain):
# Left boundary
def inside(self, x, on_boundary):
return (on_boundary and np.abs(x[0])<tol)
class RightBoundary(SubDomain):
# Right boundary
def inside(self, x, on_boundary):
return (on_boundary and np.abs(x[0]-Lngth)<tol)
#-------------------------------------------------------------------------------
def mark_boundary(mesh):
# Assign markers to each boundary segment (except the upper surface).
# This is used at each time step to update the markers.
#
# Boundary marker numbering convention:
# 1 - Left boundary
# 2 - Right boundary
# 3 - Ice-bed boundary
# 4 - Ice-water boundary
#
# This function returns these markers, which are used to define the
# boundary integrals and dirichlet conditions.
boundary_markers = MeshFunction('size_t', mesh,dim=1)
boundary_markers.set_all(0)
# Mark ice-water boundary
bdryWater = WaterBoundary()
bdryWater.mark(boundary_markers, 4)
# Mark ice-bed boundary away from lake
bdryBed = BedBoundary()
bdryBed.mark(boundary_markers, 3)
# Mark inflow boundary
bdryLeft = LeftBoundary()
bdryLeft.mark(boundary_markers, 1)
# Mark outflow boundary
bdryRight = RightBoundary()
bdryRight.mark(boundary_markers, 2)
return boundary_markers
| 35.04
| 85
| 0.614916
|
from params import tol,Lngth,Hght
from geometry import bed
import numpy as np
from dolfin import *
class WaterBoundary(SubDomain):
def inside(self, x, on_boundary):
return (on_boundary and (x[1]<0.5*Hght))
class BedBoundary(SubDomain):
def inside(self, x, on_boundary):
return (on_boundary and ((x[1]-bed(x[0]))<=tol))
class LeftBoundary(SubDomain):
def inside(self, x, on_boundary):
return (on_boundary and np.abs(x[0])<tol)
class RightBoundary(SubDomain):
def inside(self, x, on_boundary):
return (on_boundary and np.abs(x[0]-Lngth)<tol)
def mark_boundary(mesh):
boundary_markers = MeshFunction('size_t', mesh,dim=1)
boundary_markers.set_all(0)
bdryWater = WaterBoundary()
bdryWater.mark(boundary_markers, 4)
bdryBed = BedBoundary()
bdryBed.mark(boundary_markers, 3)
bdryLeft = LeftBoundary()
bdryLeft.mark(boundary_markers, 1)
bdryRight = RightBoundary()
bdryRight.mark(boundary_markers, 2)
return boundary_markers
| true
| true
|
f7176c29d3c8975aef635b4e3270b662412a46af
| 1,825
|
py
|
Python
|
data_mining/dataset/main.py
|
basantbhandari/LaptopPricePrediction
|
086cfaf99b7c625345d5d383ba7f7e2109821c43
|
[
"MIT"
] | null | null | null |
data_mining/dataset/main.py
|
basantbhandari/LaptopPricePrediction
|
086cfaf99b7c625345d5d383ba7f7e2109821c43
|
[
"MIT"
] | null | null | null |
data_mining/dataset/main.py
|
basantbhandari/LaptopPricePrediction
|
086cfaf99b7c625345d5d383ba7f7e2109821c43
|
[
"MIT"
] | null | null | null |
print("Scrape the dataset from...")
# import the necessary library
from bs4 import BeautifulSoup
import requests
import pandas as pd
# Request to website and download HTML contents
url='https://www.gadgetbytenepal.com/category/laptop-price-in-nepal/'
# write data in a file.
file1 = open("alldata.txt","w")
req=requests.get(url)
htmlcontent=req.content
# print(content)
# Format the downloadable content
soup=BeautifulSoup(htmlcontent, 'html.parser')
# print(soup.prettify())
desired_content = soup.find(class_='td-category-description')
print("############################################################")
# print(desired_content.prettify())
print("############################################################")
data_header = desired_content.find_all('h2')
print("############################################################")
#print(data_header)
print("############################################################")
print("############################################################")
#for item in data_header:
#print(item.get_text())
print("############################################################")
data_items = desired_content.find_all('div', class_ = 'su-table su-table-alternate')
print("############################################################")
# print(data_items)
print("############################################################")
print("############################################################")
i=0
for item in data_items:
print("############################################################")
# print(item)
eachrow = item.find_all('tr')
for tabledata in eachrow:
print(tabledata.get_text())
file1.writelines(tabledata.get_text())
i=i+1
print("\n",i)
print("############################################################")
file1.close()
| 19.623656
| 84
| 0.447671
|
print("Scrape the dataset from...")
from bs4 import BeautifulSoup
import requests
import pandas as pd
url='https://www.gadgetbytenepal.com/category/laptop-price-in-nepal/'
file1 = open("alldata.txt","w")
req=requests.get(url)
htmlcontent=req.content
soup=BeautifulSoup(htmlcontent, 'html.parser')
desired_content = soup.find(class_='td-category-description')
print("############################################################")
print("############################################################")
data_header = desired_content.find_all('h2')
print("############################################################")
print("############################################################")
print("############################################################")
print("############################################################")
data_items = desired_content.find_all('div', class_ = 'su-table su-table-alternate')
print("############################################################")
print("############################################################")
print("############################################################")
i=0
for item in data_items:
print("############################################################")
eachrow = item.find_all('tr')
for tabledata in eachrow:
print(tabledata.get_text())
file1.writelines(tabledata.get_text())
i=i+1
print("\n",i)
print("############################################################")
file1.close()
| true
| true
|
f7176c890702cb23e4dd9472f56d2732e8d22b76
| 7,927
|
py
|
Python
|
tests_python/tests_008/test_basic.py
|
arvidj/tezos
|
9d9e75425ebd603e9e6b9158d573424cd74e9a30
|
[
"MIT"
] | null | null | null |
tests_python/tests_008/test_basic.py
|
arvidj/tezos
|
9d9e75425ebd603e9e6b9158d573424cd74e9a30
|
[
"MIT"
] | null | null | null |
tests_python/tests_008/test_basic.py
|
arvidj/tezos
|
9d9e75425ebd603e9e6b9158d573424cd74e9a30
|
[
"MIT"
] | null | null | null |
from os import path
import pytest
from client.client import Client
from tools import utils
from tools.paths import ACCOUNT_PATH
from tools.utils import assert_run_failure
from .contract_paths import CONTRACT_PATH
TRANSFER_ARGS = ['--burn-cap', '0.257']
@pytest.mark.incremental
class TestRawContext:
def test_delegates(self, client: Client):
path = '/chains/main/blocks/head/context/raw/bytes/delegates/?depth=3'
res = client.rpc('get', path)
expected = {
"ed25519": {
"02": {"29": None},
"a9": {"ce": None},
"c5": {"5c": None},
"da": {"c9": None},
"e7": {"67": None},
}
}
assert res == expected
def test_no_service_1(self, client: Client):
path = '/chains/main/blocks/head/context/raw/bytes/non-existent'
with assert_run_failure('No service found at this URL'):
client.rpc('get', path)
def test_no_service_2(self, client: Client):
path = (
'/chains/main/blocks/head/context/raw/bytes/'
'non-existent?depth=-1'
)
expected = 'Command failed: Extraction depth -1 is invalid'
with assert_run_failure(expected):
client.rpc('get', path)
def test_no_service_3(self, client: Client):
path = '/chains/main/blocks/head/context/raw/bytes/non-existent?depth=0'
with assert_run_failure('No service found at this URL'):
client.rpc('get', path)
def test_bake(self, client: Client):
utils.bake(client, 'bootstrap4')
def test_gen_keys(self, client: Client, session):
session['keys'] = ['foo', 'bar', 'boo']
sigs = [None, 'secp256k1', 'ed25519']
for key, sig in zip(session['keys'], sigs):
args = [] if sig is None else ['--sig', sig]
client.gen_key(key, args)
def test_transfers(self, client: Client, session):
client.transfer(1000, 'bootstrap1', session['keys'][0], TRANSFER_ARGS)
utils.bake(client)
client.transfer(2000, 'bootstrap1', session['keys'][1], TRANSFER_ARGS)
utils.bake(client)
client.transfer(3000, 'bootstrap1', session['keys'][2], TRANSFER_ARGS)
utils.bake(client)
def test_balances(self, client: Client, session):
assert client.get_balance(session['keys'][0]) == 1000
assert client.get_balance(session['keys'][1]) == 2000
assert client.get_balance(session['keys'][2]) == 3000
def test_transfer_bar_foo(self, client: Client, session):
client.transfer(
1000,
session['keys'][1],
session['keys'][0],
['--fee', '0', '--force-low-fee'],
)
utils.bake(client)
def test_balances_bar_foo(self, client: Client, session):
assert client.get_balance(session['keys'][0]) == 2000
assert client.get_balance(session['keys'][1]) == 1000
def test_transfer_foo_bar(self, client: Client, session):
client.transfer(
1000, session['keys'][0], session['keys'][1], ['--fee', '0.05']
)
utils.bake(client)
def test_balances_foo_bar(self, client: Client, session):
assert client.get_balance(session['keys'][0]) == 999.95
assert client.get_balance(session['keys'][1]) == 2000
def test_transfer_failure(self, client: Client, session):
with pytest.raises(Exception):
client.transfer(999.95, session['keys'][0], session['keys'][1])
def test_originate_contract_noop(self, client: Client):
contract = path.join(CONTRACT_PATH, 'opcodes', 'noop.tz')
client.remember('noop', contract)
client.typecheck(contract)
client.originate(
'noop', 1000, 'bootstrap1', contract, ['--burn-cap', '0.295']
)
utils.bake(client)
def test_transfer_to_noop(self, client: Client):
client.transfer(10, 'bootstrap1', 'noop', ['--arg', 'Unit'])
utils.bake(client)
def test_contract_hardlimit(self, client: Client):
contract = path.join(CONTRACT_PATH, 'mini_scenarios', 'hardlimit.tz')
client.originate(
'hardlimit',
1000,
'bootstrap1',
contract,
['--init', '3', '--burn-cap', '0.341'],
)
utils.bake(client)
client.transfer(10, 'bootstrap1', 'hardlimit', ['--arg', 'Unit'])
utils.bake(client)
client.transfer(10, 'bootstrap1', 'hardlimit', ['--arg', 'Unit'])
utils.bake(client)
def test_transfers_bootstraps5_bootstrap1(self, client: Client):
assert client.get_balance('bootstrap5') == 4000000
client.transfer(
400000,
'bootstrap5',
'bootstrap1',
['--fee', '0', '--force-low-fee'],
)
utils.bake(client)
client.transfer(
400000,
'bootstrap1',
'bootstrap5',
['--fee', '0', '--force-low-fee'],
)
utils.bake(client)
assert client.get_balance('bootstrap5') == 4000000
def test_activate_accounts(self, client: Client, session):
account = f"{ACCOUNT_PATH}/king_commitment.json"
session['keys'] += ['king', 'queen']
client.activate_account(session['keys'][3], account)
utils.bake(client)
account = f"{ACCOUNT_PATH}/queen_commitment.json"
client.activate_account(session['keys'][4], account)
utils.bake(client)
assert client.get_balance(session['keys'][3]) == 23932454.669343
assert client.get_balance(session['keys'][4]) == 72954577.464032
def test_transfer_king_queen(self, client: Client, session):
keys = session['keys']
client.transfer(10, keys[3], keys[4], TRANSFER_ARGS)
utils.bake(client)
def test_duplicate_alias(self, client: Client):
client.add_address("baz", "foo", force=True)
show_foo = client.show_address("foo", show_secret=True)
assert show_foo.secret_key is not None
class TestRememberContract:
@pytest.mark.parametrize(
"contract_name,non_originated_contract_address",
[
("test", "KT1BuEZtb68c1Q4yjtckcNjGELqWt56Xyesc"),
("test-2", "KT1TZCh8fmUbuDqFxetPWC2fsQanAHzLx4W9"),
],
)
def test_non_originated_contract_no_forcing_not_saved_before(
self,
client,
contract_name,
non_originated_contract_address,
):
client.remember_contract(contract_name, non_originated_contract_address)
# As it is always the same client, the contracts have been saved
# before
@pytest.mark.parametrize(
"contract_name,non_originated_contract_address",
[
("test", "KT1BuEZtb68c1Q4yjtckcNjGELqWt56Xyesc"),
("test-2", "KT1TZCh8fmUbuDqFxetPWC2fsQanAHzLx4W9"),
],
)
def test_non_originated_contract_with_forcing_and_saved_before(
self,
client,
contract_name,
non_originated_contract_address,
):
client.remember_contract(
contract_name, non_originated_contract_address, force=True
)
# As it is always the same client, the contracts have been saved
# before
@pytest.mark.parametrize(
"contract_name,non_originated_contract_address",
[
("test", "KT1BuEZtb68c1Q4yjtckcNjGELqWt56Xyesc"),
("test-2", "KT1TZCh8fmUbuDqFxetPWC2fsQanAHzLx4W9"),
],
)
def test_non_originated_contract_no_forcing_and_saved_before(
self,
client,
contract_name,
non_originated_contract_address,
):
expected_error = f"The contract alias {contract_name} already exists"
with assert_run_failure(expected_error):
client.remember_contract(
contract_name, non_originated_contract_address, force=False
)
| 35.707207
| 80
| 0.608931
|
from os import path
import pytest
from client.client import Client
from tools import utils
from tools.paths import ACCOUNT_PATH
from tools.utils import assert_run_failure
from .contract_paths import CONTRACT_PATH
TRANSFER_ARGS = ['--burn-cap', '0.257']
@pytest.mark.incremental
class TestRawContext:
def test_delegates(self, client: Client):
path = '/chains/main/blocks/head/context/raw/bytes/delegates/?depth=3'
res = client.rpc('get', path)
expected = {
"ed25519": {
"02": {"29": None},
"a9": {"ce": None},
"c5": {"5c": None},
"da": {"c9": None},
"e7": {"67": None},
}
}
assert res == expected
def test_no_service_1(self, client: Client):
path = '/chains/main/blocks/head/context/raw/bytes/non-existent'
with assert_run_failure('No service found at this URL'):
client.rpc('get', path)
def test_no_service_2(self, client: Client):
path = (
'/chains/main/blocks/head/context/raw/bytes/'
'non-existent?depth=-1'
)
expected = 'Command failed: Extraction depth -1 is invalid'
with assert_run_failure(expected):
client.rpc('get', path)
def test_no_service_3(self, client: Client):
path = '/chains/main/blocks/head/context/raw/bytes/non-existent?depth=0'
with assert_run_failure('No service found at this URL'):
client.rpc('get', path)
def test_bake(self, client: Client):
utils.bake(client, 'bootstrap4')
def test_gen_keys(self, client: Client, session):
session['keys'] = ['foo', 'bar', 'boo']
sigs = [None, 'secp256k1', 'ed25519']
for key, sig in zip(session['keys'], sigs):
args = [] if sig is None else ['--sig', sig]
client.gen_key(key, args)
def test_transfers(self, client: Client, session):
client.transfer(1000, 'bootstrap1', session['keys'][0], TRANSFER_ARGS)
utils.bake(client)
client.transfer(2000, 'bootstrap1', session['keys'][1], TRANSFER_ARGS)
utils.bake(client)
client.transfer(3000, 'bootstrap1', session['keys'][2], TRANSFER_ARGS)
utils.bake(client)
def test_balances(self, client: Client, session):
assert client.get_balance(session['keys'][0]) == 1000
assert client.get_balance(session['keys'][1]) == 2000
assert client.get_balance(session['keys'][2]) == 3000
def test_transfer_bar_foo(self, client: Client, session):
client.transfer(
1000,
session['keys'][1],
session['keys'][0],
['--fee', '0', '--force-low-fee'],
)
utils.bake(client)
def test_balances_bar_foo(self, client: Client, session):
assert client.get_balance(session['keys'][0]) == 2000
assert client.get_balance(session['keys'][1]) == 1000
def test_transfer_foo_bar(self, client: Client, session):
client.transfer(
1000, session['keys'][0], session['keys'][1], ['--fee', '0.05']
)
utils.bake(client)
def test_balances_foo_bar(self, client: Client, session):
assert client.get_balance(session['keys'][0]) == 999.95
assert client.get_balance(session['keys'][1]) == 2000
def test_transfer_failure(self, client: Client, session):
with pytest.raises(Exception):
client.transfer(999.95, session['keys'][0], session['keys'][1])
def test_originate_contract_noop(self, client: Client):
contract = path.join(CONTRACT_PATH, 'opcodes', 'noop.tz')
client.remember('noop', contract)
client.typecheck(contract)
client.originate(
'noop', 1000, 'bootstrap1', contract, ['--burn-cap', '0.295']
)
utils.bake(client)
def test_transfer_to_noop(self, client: Client):
client.transfer(10, 'bootstrap1', 'noop', ['--arg', 'Unit'])
utils.bake(client)
def test_contract_hardlimit(self, client: Client):
contract = path.join(CONTRACT_PATH, 'mini_scenarios', 'hardlimit.tz')
client.originate(
'hardlimit',
1000,
'bootstrap1',
contract,
['--init', '3', '--burn-cap', '0.341'],
)
utils.bake(client)
client.transfer(10, 'bootstrap1', 'hardlimit', ['--arg', 'Unit'])
utils.bake(client)
client.transfer(10, 'bootstrap1', 'hardlimit', ['--arg', 'Unit'])
utils.bake(client)
def test_transfers_bootstraps5_bootstrap1(self, client: Client):
assert client.get_balance('bootstrap5') == 4000000
client.transfer(
400000,
'bootstrap5',
'bootstrap1',
['--fee', '0', '--force-low-fee'],
)
utils.bake(client)
client.transfer(
400000,
'bootstrap1',
'bootstrap5',
['--fee', '0', '--force-low-fee'],
)
utils.bake(client)
assert client.get_balance('bootstrap5') == 4000000
def test_activate_accounts(self, client: Client, session):
account = f"{ACCOUNT_PATH}/king_commitment.json"
session['keys'] += ['king', 'queen']
client.activate_account(session['keys'][3], account)
utils.bake(client)
account = f"{ACCOUNT_PATH}/queen_commitment.json"
client.activate_account(session['keys'][4], account)
utils.bake(client)
assert client.get_balance(session['keys'][3]) == 23932454.669343
assert client.get_balance(session['keys'][4]) == 72954577.464032
def test_transfer_king_queen(self, client: Client, session):
keys = session['keys']
client.transfer(10, keys[3], keys[4], TRANSFER_ARGS)
utils.bake(client)
def test_duplicate_alias(self, client: Client):
client.add_address("baz", "foo", force=True)
show_foo = client.show_address("foo", show_secret=True)
assert show_foo.secret_key is not None
class TestRememberContract:
@pytest.mark.parametrize(
"contract_name,non_originated_contract_address",
[
("test", "KT1BuEZtb68c1Q4yjtckcNjGELqWt56Xyesc"),
("test-2", "KT1TZCh8fmUbuDqFxetPWC2fsQanAHzLx4W9"),
],
)
def test_non_originated_contract_no_forcing_not_saved_before(
self,
client,
contract_name,
non_originated_contract_address,
):
client.remember_contract(contract_name, non_originated_contract_address)
@pytest.mark.parametrize(
"contract_name,non_originated_contract_address",
[
("test", "KT1BuEZtb68c1Q4yjtckcNjGELqWt56Xyesc"),
("test-2", "KT1TZCh8fmUbuDqFxetPWC2fsQanAHzLx4W9"),
],
)
def test_non_originated_contract_with_forcing_and_saved_before(
self,
client,
contract_name,
non_originated_contract_address,
):
client.remember_contract(
contract_name, non_originated_contract_address, force=True
)
@pytest.mark.parametrize(
"contract_name,non_originated_contract_address",
[
("test", "KT1BuEZtb68c1Q4yjtckcNjGELqWt56Xyesc"),
("test-2", "KT1TZCh8fmUbuDqFxetPWC2fsQanAHzLx4W9"),
],
)
def test_non_originated_contract_no_forcing_and_saved_before(
self,
client,
contract_name,
non_originated_contract_address,
):
expected_error = f"The contract alias {contract_name} already exists"
with assert_run_failure(expected_error):
client.remember_contract(
contract_name, non_originated_contract_address, force=False
)
| true
| true
|
f7176e98b7a02c6157bf7280dc45d7ede12e9f2b
| 2,915
|
py
|
Python
|
Bio/Geo/Record.py
|
bneron/biopython
|
2c52e57661c8f6cdf4a191850b2f6871f8582af7
|
[
"PostgreSQL"
] | 1
|
2019-07-29T02:53:51.000Z
|
2019-07-29T02:53:51.000Z
|
Bio/Geo/Record.py
|
bneron/biopython
|
2c52e57661c8f6cdf4a191850b2f6871f8582af7
|
[
"PostgreSQL"
] | 1
|
2021-09-11T14:30:32.000Z
|
2021-09-11T14:30:32.000Z
|
Bio/Geo/Record.py
|
bneron/biopython
|
2c52e57661c8f6cdf4a191850b2f6871f8582af7
|
[
"PostgreSQL"
] | 2
|
2016-12-19T02:27:46.000Z
|
2019-07-29T02:53:54.000Z
|
# Copyright 2001 by Katharine Lindner. All rights reserved.
# Copyright 2006 by PeterC. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Hold GEO data in a straightforward format.
classes:
o Record - All of the information in an GEO record.
See http://www.ncbi.nlm.nih.gov/geo/
"""
from __future__ import print_function
class Record(object):
"""Hold GEO information in a format similar to the original record.
The Record class is meant to make data easy to get to when you are
just interested in looking at GEO data.
Attributes:
entity_type
entity_id
entity_attributes
col_defs
table_rows
"""
def __init__(self):
self.entity_type = ''
self.entity_id = ''
self.entity_attributes = {}
self.col_defs = {}
self.table_rows = []
def __str__(self):
output = ''
output += 'GEO Type: %s\n' % self.entity_type
output += 'GEO Id: %s\n' % self.entity_id
att_keys = sorted(self.entity_attributes)
for key in att_keys:
contents = self.entity_attributes[key]
if isinstance(contents, list):
for item in contents:
try:
output += '%s: %s\n' % (key, item[:40])
output += out_block(item[40:])
except:
pass
elif isinstance(contents, str):
output += '%s: %s\n' % (key, contents[:40])
output += out_block(contents[40:])
else:
print(contents)
output += '%s: %s\n' % (key, contents[:40])
output += out_block(contents[40:])
col_keys = sorted(self.col_defs)
output += 'Column Header Definitions\n'
for key in col_keys:
val = self.col_defs[key]
output += ' %s: %s\n' % (key, val[:40])
output += out_block(val[40:], ' ')
# May have to display VERY large tables,
# so only show the first 20 lines of data
MAX_ROWS = 20 + 1 # include header in count
for row in self.table_rows[0:MAX_ROWS]:
output += '%s: ' % self.table_rows.index(row)
for col in row:
output += '%s\t' % col
output += '\n'
if len(self.table_rows) > MAX_ROWS:
output += '...\n'
row = self.table_rows[-1]
output += '%s: ' % self.table_rows.index(row)
for col in row:
output += '%s\t' % col
output += '\n'
return output
def out_block(text, prefix=''):
output = ''
for j in range(0, len(text), 80):
output += '%s%s\n' % (prefix, text[j:j + 80])
output += '\n'
return output
| 32.032967
| 71
| 0.540995
|
from __future__ import print_function
class Record(object):
def __init__(self):
self.entity_type = ''
self.entity_id = ''
self.entity_attributes = {}
self.col_defs = {}
self.table_rows = []
def __str__(self):
output = ''
output += 'GEO Type: %s\n' % self.entity_type
output += 'GEO Id: %s\n' % self.entity_id
att_keys = sorted(self.entity_attributes)
for key in att_keys:
contents = self.entity_attributes[key]
if isinstance(contents, list):
for item in contents:
try:
output += '%s: %s\n' % (key, item[:40])
output += out_block(item[40:])
except:
pass
elif isinstance(contents, str):
output += '%s: %s\n' % (key, contents[:40])
output += out_block(contents[40:])
else:
print(contents)
output += '%s: %s\n' % (key, contents[:40])
output += out_block(contents[40:])
col_keys = sorted(self.col_defs)
output += 'Column Header Definitions\n'
for key in col_keys:
val = self.col_defs[key]
output += ' %s: %s\n' % (key, val[:40])
output += out_block(val[40:], ' ')
MAX_ROWS = 20 + 1
for row in self.table_rows[0:MAX_ROWS]:
output += '%s: ' % self.table_rows.index(row)
for col in row:
output += '%s\t' % col
output += '\n'
if len(self.table_rows) > MAX_ROWS:
output += '...\n'
row = self.table_rows[-1]
output += '%s: ' % self.table_rows.index(row)
for col in row:
output += '%s\t' % col
output += '\n'
return output
def out_block(text, prefix=''):
output = ''
for j in range(0, len(text), 80):
output += '%s%s\n' % (prefix, text[j:j + 80])
output += '\n'
return output
| true
| true
|
f7176eb2dd972a167ede03275af13333e556edee
| 829
|
py
|
Python
|
setup.py
|
brandjon/iast
|
23961536c3bfb5d8fce39c28214ea88b8072450c
|
[
"PSF-2.0"
] | 11
|
2015-01-04T08:40:09.000Z
|
2021-03-24T03:56:34.000Z
|
setup.py
|
brandjon/iast
|
23961536c3bfb5d8fce39c28214ea88b8072450c
|
[
"PSF-2.0"
] | null | null | null |
setup.py
|
brandjon/iast
|
23961536c3bfb5d8fce39c28214ea88b8072450c
|
[
"PSF-2.0"
] | null | null | null |
from setuptools import setup
setup(
name = 'iAST',
version = '0.2.1',
url = 'https://github.com/brandjon/iast',
author = 'Jon Brandvein',
author_email = 'jon.brandvein@gmail.com',
license = 'MIT License',
description = 'A library for defining and manipulating ASTs',
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages = ['iast', 'iast.asdl', 'iast.python'],
package_data = {'iast.asdl': ['*.asdl']},
test_suite = 'tests',
install_requires = ['simplestruct >=0.2.1'],
)
| 29.607143
| 71
| 0.546441
|
from setuptools import setup
setup(
name = 'iAST',
version = '0.2.1',
url = 'https://github.com/brandjon/iast',
author = 'Jon Brandvein',
author_email = 'jon.brandvein@gmail.com',
license = 'MIT License',
description = 'A library for defining and manipulating ASTs',
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages = ['iast', 'iast.asdl', 'iast.python'],
package_data = {'iast.asdl': ['*.asdl']},
test_suite = 'tests',
install_requires = ['simplestruct >=0.2.1'],
)
| true
| true
|
f717706b17d80b336156b8f3eb5d703f5a5b7596
| 3,082
|
py
|
Python
|
airbyte-integrations/connectors/source-paypal-transaction/bin/fixture_helper.py
|
luizgribeiro/airbyte
|
71a96f5417b678c39b34e2e92234d8a51529e086
|
[
"MIT"
] | 2
|
2021-08-04T03:17:38.000Z
|
2021-11-15T10:16:08.000Z
|
airbyte-integrations/connectors/source-paypal-transaction/bin/fixture_helper.py
|
luizgribeiro/airbyte
|
71a96f5417b678c39b34e2e92234d8a51529e086
|
[
"MIT"
] | 52
|
2021-06-11T12:39:05.000Z
|
2022-03-30T04:59:35.000Z
|
airbyte-integrations/connectors/source-paypal-transaction/bin/fixture_helper.py
|
luizgribeiro/airbyte
|
71a96f5417b678c39b34e2e92234d8a51529e086
|
[
"MIT"
] | 2
|
2021-12-14T17:15:40.000Z
|
2021-12-14T17:18:03.000Z
|
#
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import logging
from pprint import pprint
# %%
import requests
logging.basicConfig(level=logging.DEBUG)
# %%
specification = {
"client_id": "REPLACE_ME",
"secret": "REPLACE_ME",
"start_date": "2021-06-01T00:00:00+00:00",
"end_date": "2021-06-30T00:00:00+00:00",
"is_sandbox": True,
}
# %% READ <client_id> and <secret>
client_id = specification.get("client_id")
secret = specification.get("secret")
# %% GET API_TOKEN
token_refresh_endpoint = "https://api-m.sandbox.paypal.com/v1/oauth2/token"
data = "grant_type=client_credentials"
headers = {
"Accept": "application/json",
"Accept-Language": "en_US",
}
response = requests.request(
method="POST",
url=token_refresh_endpoint,
data=data,
headers=headers,
auth=(client_id, secret),
)
response_json = response.json()
print(response_json)
API_TOKEN = response_json["access_token"]
# CREATE TRANSACTIONS
# for i in range(1000):
# create_response = requests.post(
# "https://api-m.sandbox.paypal.com/v2/checkout/orders",
# headers={'content-type': 'application/json', 'authorization': f'Bearer {API_TOKEN}', "prefer": "return=representation"},
# json={
# "intent": "CAPTURE",
# "purchase_units": [
# {
# "amount": {
# "currency_code": "USD",
# "value": f"{float(i)}"
# }
# }
# ]
# }
# )
#
# print(create_response.json())
# %% LIST TRANSACTIONS
url = "https://api-m.sandbox.paypal.com/v1/reporting/transactions"
params = {
"start_date": "2021-06-20T00:00:00+00:00",
"end_date": "2021-07-10T07:19:45Z",
"fields": "all",
"page_size": "100",
"page": "1",
}
headers = {
"Authorization": f"Bearer {API_TOKEN}",
"Content-Type": "application/json",
}
response = requests.get(
url,
headers=headers,
params=params,
)
pprint(response.json())
| 28.018182
| 130
| 0.658014
|
import logging
from pprint import pprint
import requests
logging.basicConfig(level=logging.DEBUG)
specification = {
"client_id": "REPLACE_ME",
"secret": "REPLACE_ME",
"start_date": "2021-06-01T00:00:00+00:00",
"end_date": "2021-06-30T00:00:00+00:00",
"is_sandbox": True,
}
client_id = specification.get("client_id")
secret = specification.get("secret")
token_refresh_endpoint = "https://api-m.sandbox.paypal.com/v1/oauth2/token"
data = "grant_type=client_credentials"
headers = {
"Accept": "application/json",
"Accept-Language": "en_US",
}
response = requests.request(
method="POST",
url=token_refresh_endpoint,
data=data,
headers=headers,
auth=(client_id, secret),
)
response_json = response.json()
print(response_json)
API_TOKEN = response_json["access_token"]
url = "https://api-m.sandbox.paypal.com/v1/reporting/transactions"
params = {
"start_date": "2021-06-20T00:00:00+00:00",
"end_date": "2021-07-10T07:19:45Z",
"fields": "all",
"page_size": "100",
"page": "1",
}
headers = {
"Authorization": f"Bearer {API_TOKEN}",
"Content-Type": "application/json",
}
response = requests.get(
url,
headers=headers,
params=params,
)
pprint(response.json())
| true
| true
|
f71770a671bd24aaba89e7db742a94eb08713f1f
| 2,095
|
py
|
Python
|
test/main_test.py
|
vaughn-johnson/talkspace-public-api
|
20eca278e8ac651f610d6afaff8fdc3fce2918fc
|
[
"MIT"
] | null | null | null |
test/main_test.py
|
vaughn-johnson/talkspace-public-api
|
20eca278e8ac651f610d6afaff8fdc3fce2918fc
|
[
"MIT"
] | 6
|
2020-11-19T04:25:05.000Z
|
2020-11-20T20:53:32.000Z
|
test/main_test.py
|
vaughn-johnson/talkspace-public-api
|
20eca278e8ac651f610d6afaff8fdc3fce2918fc
|
[
"MIT"
] | null | null | null |
from unittest.mock import MagicMock, Mock, patch
from .mock_mongo import MockPyMongo
import json
import sys
import os
### TEST SETUP ###
EXPECTED_DATA_FRAME_FILENAME = os.path.join(os.path.dirname(__file__),
'expected_response.json')
EXPECTED_DATA_FRAME = open(EXPECTED_DATA_FRAME_FILENAME).read()
sys.modules['pymongo'] = MockPyMongo
cloud_mock = MagicMock()
sys.modules['google.cloud'] = cloud_mock
##################
from src.main import _get_data, _refresh_data # noqa: E402
def test_snapshot():
expected = _pretty_print(EXPECTED_DATA_FRAME)
observed = _pretty_print(f'{_get_data().to_json()}')
assert expected == observed
def test_cold_cache_json():
_blob().return_value.exists = lambda: False
_blob().return_value.download_as_string.return_value = '{}'
find = MockPyMongo.MongoClient().talkspace.messages.find
find.reset_mock()
_refresh_data('json')
find.assert_called_once()
def test_cold_cache_csv():
_blob().return_value.exists = lambda: False
_blob().return_value.download_as_string.return_value = '{}'
find = MockPyMongo.MongoClient().talkspace.messages.find
find.reset_mock()
_refresh_data('csv')
find.assert_called_once()
def test_warm_cache_json():
_blob().return_value.exists = lambda: True
_blob().return_value.download_as_string.return_value = '{}'
find = MockPyMongo.MongoClient().talkspace.messages.find
find.reset_mock()
_refresh_data('json')
find.assert_not_called()
def test_warm_cache_csv():
_blob().return_value.exists = lambda: True
_blob().return_value.download_as_string.return_value = '{}'
find = MockPyMongo.MongoClient().talkspace.messages.find
find.reset_mock()
_refresh_data('csv')
find.assert_not_called()
def _pretty_print(json_string):
json.dumps(
json.loads(json_string),
indent=2,
sort_keys=True
)
def _blob():
return cloud_mock.storage\
.Client.return_value\
.bucket.return_value\
.blob
| 26.858974
| 70
| 0.686396
|
from unittest.mock import MagicMock, Mock, patch
from .mock_mongo import MockPyMongo
import json
import sys
import os
.join(os.path.dirname(__file__),
'expected_response.json')
EXPECTED_DATA_FRAME = open(EXPECTED_DATA_FRAME_FILENAME).read()
sys.modules['pymongo'] = MockPyMongo
cloud_mock = MagicMock()
sys.modules['google.cloud'] = cloud_mock
{_get_data().to_json()}')
assert expected == observed
def test_cold_cache_json():
_blob().return_value.exists = lambda: False
_blob().return_value.download_as_string.return_value = '{}'
find = MockPyMongo.MongoClient().talkspace.messages.find
find.reset_mock()
_refresh_data('json')
find.assert_called_once()
def test_cold_cache_csv():
_blob().return_value.exists = lambda: False
_blob().return_value.download_as_string.return_value = '{}'
find = MockPyMongo.MongoClient().talkspace.messages.find
find.reset_mock()
_refresh_data('csv')
find.assert_called_once()
def test_warm_cache_json():
_blob().return_value.exists = lambda: True
_blob().return_value.download_as_string.return_value = '{}'
find = MockPyMongo.MongoClient().talkspace.messages.find
find.reset_mock()
_refresh_data('json')
find.assert_not_called()
def test_warm_cache_csv():
_blob().return_value.exists = lambda: True
_blob().return_value.download_as_string.return_value = '{}'
find = MockPyMongo.MongoClient().talkspace.messages.find
find.reset_mock()
_refresh_data('csv')
find.assert_not_called()
def _pretty_print(json_string):
json.dumps(
json.loads(json_string),
indent=2,
sort_keys=True
)
def _blob():
return cloud_mock.storage\
.Client.return_value\
.bucket.return_value\
.blob
| true
| true
|
f7177131572435ea6057a138ae45fca472fca8a1
| 50,438
|
py
|
Python
|
train/comms/pt/comms.py
|
caogao/param
|
9de2602c894df264a004c352ee16abc14f93da76
|
[
"MIT"
] | null | null | null |
train/comms/pt/comms.py
|
caogao/param
|
9de2602c894df264a004c352ee16abc14f93da76
|
[
"MIT"
] | null | null | null |
train/comms/pt/comms.py
|
caogao/param
|
9de2602c894df264a004c352ee16abc14f93da76
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import time
import comms_utils
import numpy as np
# pytorch
import torch
from comms_utils import paramCommsBench, ensureTensorFlush
### TODO: add these to class variables?
supportedCollectives = [
"reduce",
"all_reduce",
"all_to_all",
"all_to_allv",
"all_gather",
"broadcast",
"reduce_scatter",
"reduce_scatter_base",
"all_gather_base",
"incast",
"multicast",
] # , "scatter", "gather"]
pt2ptPatterns = [
"one2one",
"pairwise",
]
logger = logging.getLogger(__name__)
class MultilineFormatter(argparse.ArgumentDefaultsHelpFormatter):
def _split_lines(self, text, width):
if text.startswith("R|"):
return text[2:].splitlines()
# this is the RawTextHelpFormatter._split_lines
return argparse.ArgumentDefaultsHelpFormatter._split_lines(self, text, width)
# define the collective benchmark
class commsCollBench(paramCommsBench):
def __init__(self):
super().__init__(supportedNwstacks=["pytorch-dist", "pytorch-xla-tpu"])
# def readCollArgs(self, parser):
def readArgs(self, parser):
# read the common/basic arguments
super().readArgs(parser)
parser.add_argument(
"--w", type=int, default=5, help="number of warmup iterations"
) # number of warmup-iterations
parser.add_argument(
"--n", type=int, default=5, help="number of iterations"
) # number of iterations
# experiment related parameters
parser.add_argument(
"--mode",
type=str,
default="comms",
help="benchmark mode",
choices=["comms", "compute", "dlrm", "comms-compute"],
) # alternative is DLRM mode or comm-compute mode
parser.add_argument(
"--b", type=str, default="8", help="minimum size, in bytes, to start with"
) # COMMS mode, begin the sweep at.
parser.add_argument(
"--e", type=str, default="64", help="maximum size, in bytes, to end at"
) # COMMS mode, end the sweep at.
parser.add_argument(
"--f", type=int, default=2, help="multiplication factor between sizes"
) # COMMS mode, multiplication factor.
parser.add_argument(
"--collective",
type=str,
default="all_reduce",
help="Collective operation to be evaluated",
choices=supportedCollectives,
) # collective op to benchmark
# For comm-compute or compute mode
parser.add_argument(
"--kernel",
type=str,
default="gemm",
help="Compute kernel, used for comms-compute or compute mode",
choices=["gemm", "emb_lookup"],
) # Compute kernel: "gemm"
parser.add_argument(
"--num-compute",
type=int,
default=100,
help="one collective for every NUM_COMPUTE compute kernels",
) # Launch one coll for every n compute kernels
# For GEMM
parser.add_argument(
"--mm-dim",
type=int,
default=100,
help="dimension size for GEMM compute kernel",
) # Matrix multiplication dim n, A[n,n] * B [n,n]
# For emb lookup
parser.add_argument(
"--emb-dim",
type=int,
default=128,
help="dimension size for Embedding table compute kernel",
) # Embedding table dimension
parser.add_argument(
"--num-embs",
type=int,
default=100000,
help="Embedding table hash size for Embedding table compute kernel",
) # Embedding table hash size
parser.add_argument(
"--avg-len",
type=int,
default=28,
help="Average lookup operations per sample",
) # Average #lookup per sample
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="number of samples reading the table concurrently",
) # #Samples reading the table concurrently
parser.add_argument(
"--root", type=int, default=0, help="root process for reduce benchmark"
) # root process for reduce and bcast (and gather, scatter, etc., if support in the future)
# TODO: check the correctness of root, should be between 0 to [world_size -1]
parser.add_argument(
"--src-ranks",
type=str,
nargs="?",
help="R|src ranks for many-to-one incast pattern or pt2pt.\n"
"List of ranks separated by comma or a range specified by start:end.\n"
"Pt2pt one2one should set only one rank.\n"
"The default value of incast includes all ranks, pt2pt includes rank 0.",
) # optional: group of src ranks in many-to-one incast or pt2pt
parser.add_argument(
"--dst-ranks",
type=str,
nargs="?",
help="R|dst ranks for one-to-many multicast pattern or pt2pt.\n"
"List of ranks separated by comma or a range specified by start:end.\n"
"Pt2pt one2one should set only one rank\n"
"The default value of multicast includes all ranks, pt2pt includes rank 1.",
) # optional: group of dst ranks in one-to-many multicast or pt2pt
parser.add_argument(
"--pair",
action="store_true",
default=False,
help="Toggle to enable collective pair mode",
)
parser.add_argument(
"--collective-pair",
type=str,
default="all_reduce",
help="Collective pair operation to be evaluated",
choices=supportedCollectives,
) # collective op to pair with the other collective, --collective should be non-empty
parser.add_argument(
"--overlap-pair-pgs",
action="store_true",
default=False,
help="Toggle to enable overlapping collective pair with two pgs",
) # overlap collective pair with two pgs
parser.add_argument(
"--pt2pt",
type=str,
default=None,
help="point to point pattern",
choices=pt2ptPatterns,
) # point to point mode
parser.add_argument(
"--window",
type=int,
default=100,
help="window size for pt2pt throughput test",
) # optional: point to point throughput test window size
return parser.parse_known_args()
def checkArgs(self, args):
super().checkArgs(args)
if args.pt2pt is not None:
args.collective = "pt2pt"
if args.pt2pt not in pt2ptPatterns:
logger.error(
f"Specified pt2pt pattern: {args.pt2pt} is not one of the supported pt2pt patterns: {str(pt2ptPatterns)}"
)
comms_utils.gracefulExit()
args.b = comms_utils.parsesize(args.b)
args.e = comms_utils.parsesize(args.e)
args.dtype = self.dtypeMap[args.data_type]
if args.b < 1:
logger.warning(
f"Starting size (--b {args.b}) should be greater than 1 byte...fix and continue"
)
args.b = 1
if args.e < args.b:
logger.warning(
f"the begin-size (--b {args.b}) is larger than the end-size (--e {args.e})"
)
if args.device == "cpu" and args.backend == "nccl":
raise ValueError(f"NCCL is not supported for device type {args.device}")
if args.c == 1 and args.z == 0 and args.collective in ("all_reduce", "reduce", "reduce_scatter"):
logger.warning(
f"Data validation is not supported for {args.collective} in non-blocking mode, disabled and continue"
)
args.c = 0
# run a few sanity checks
if args.bitwidth < 32:
if args.device != "cuda":
logger.error(
f"collective quantization may not be fully supported for {args.device}"
)
comms_utils.checkQuantArgs(
args.collective,
args.dtype,
args.b,
args.quant_a2a_embedding_dim,
args.z,
)
def runColl(self, comm_fn=None, compute_fn=None, comm_fn_pair=None):
self.backendFuncs.complete_accel_ops(self.collectiveArgs, initOp=True)
self.backendFuncs.sync_barrier(self.collectiveArgs, desc="runColl_begin")
elapsedTimeNS = 0.0
is_blocking = not self.collectiveArgs.asyncOp
enable_comms = False if (comm_fn is None or comm_fn == self.backendFuncs.noop) else True
enable_compute = False if (compute_fn is None or compute_fn == self.backendFuncs.noop) else True
enable_comms_pair = False if (comm_fn_pair is None or comm_fn_pair == self.backendFuncs.noop) else True
# for comms pair mode, force async comms for overlapping evaluation
if enable_comms_pair:
self.collectiveArgs.asyncOp = True
for nIter in range(
self.collectiveArgs.numWarmupIters + self.collectiveArgs.numIters
):
if nIter == self.collectiveArgs.numWarmupIters:
# Flush non-blocking ops to ensure warmup is really complete
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
ensureTensorFlush(self.collectiveArgs.opTensor)
if enable_comms_pair:
ensureTensorFlush(self.collectiveArgs.opTensor_pair)
# Start measuring time after warmup iterations
elapsedTimeNS = 0.0
self.collectiveArgs.quant_time.reset()
self.collectiveArgs.dequant_time.reset()
# reset tensor values for data validation check
if enable_comms:
self.setTensorVal(self.collectiveArgs.opTensor)
# for blocking mode, do barrier before starting collective
if is_blocking:
self.backendFuncs.sync_barrier(self.collectiveArgs)
start = time.monotonic() # available only in py3
self.collectiveArgs.group = self.backendFuncs.get_next_group()
comm_fn(self.collectiveArgs)
# post another collecitve if on comms pair mode, otherwise it's noop
self.collectiveArgs.group = self.backendFuncs.get_next_group()
comm_fn_pair(self.collectiveArgs, pair=enable_comms_pair)
if enable_compute:
for _ in range(self.collectiveArgs.numComputePerColl):
# TODO: investigate the cache effect
# Flush the cache
# _ = torch.rand(6 * 1024 * 1024 // 4).float() * 2 # V100 6MB L2 cache
compute_fn(self.collectiveArgs)
if is_blocking: # should be sychronous, wait for the collective
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
# Measuring time.
elapsedTimeNS += (
time.monotonic() - start
) * 1e9 # keeping time in NS, helps in divising data by nanosecond
start = time.monotonic() # available only in py3
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
end = time.monotonic() # available only in py3
ensureTensorFlush(self.collectiveArgs.opTensor)
if enable_comms_pair:
ensureTensorFlush(self.collectiveArgs.opTensor_pair)
elapsedTimeNS += (
end - start
) * 1e9 # keeping time in NS, helps in divising data by nanoseconds
memSize = self.backendFuncs.get_mem_size(self.collectiveArgs)
avgIterNS, algBW = comms_utils.getAlgBW(
elapsedTimeNS, memSize, self.collectiveArgs.numIters
)
busBW = self.backendFuncs.getBusBW(
self.collectiveArgs.collective,
algBW,
self.collectiveArgs,
)
if enable_comms_pair:
memSize_pair = self.backendFuncs.get_mem_size(
self.collectiveArgs, pair=enable_comms_pair
)
memSize += memSize_pair
_, algBW_pair = comms_utils.getAlgBW(
elapsedTimeNS, memSize_pair, self.collectiveArgs.numIters
)
algBW += algBW_pair
busBW += self.backendFuncs.getBusBW(
self.collectiveArgs.collective_pair,
algBW_pair,
self.collectiveArgs,
)
self.backendFuncs.sync_barrier(self.collectiveArgs, desc="runColl_end")
results = {
"timeUS": avgIterNS / 1e3,
"algBW": algBW,
"busBW": busBW,
"memSize": memSize,
}
return results
def runPt2Pt(self):
self.backendFuncs.complete_accel_ops(self.collectiveArgs, initOp=True)
# warm-up
memSize = self.backendFuncs.get_mem_size(self.collectiveArgs)
self.getPingLatency(self.collectiveArgs.numWarmupIters)
self.getPingPongLatency(self.collectiveArgs.numWarmupIters)
self.getUniBW(self.collectiveArgs.numWarmupIters, memSize)
self.getBiBW(self.collectiveArgs.numWarmupIters, memSize)
self.backendFuncs.sync_barrier(self.collectiveArgs, "runpt2pt_begin")
# pt2pt benchmark
pingPerIterNS = self.getPingLatency(self.collectiveArgs.numIters)
pingPongPerIterNS = self.getPingPongLatency(self.collectiveArgs.numIters)
avgUniBW = self.getUniBW(self.collectiveArgs.numIters, memSize)
avgBiBW = self.getBiBW(self.collectiveArgs.numIters, memSize)
self.backendFuncs.sync_barrier(self.collectiveArgs, "runpt2pt")
results = {
"pingPerIterNS": pingPerIterNS,
"pingPongPerIterNS": pingPongPerIterNS,
"avgUniBW": avgUniBW,
"avgBiBW": avgBiBW,
"memSize": memSize,
}
return results
def getPingLatency(self, numIters):
logger.debug(
"STATUS: begin ping test with src_ranks=%s, dst_ranks=%s."
% (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)
)
self.collectiveArgs.asyncOp = False
# get one-way latency
pingLatencyNS = []
for _ in range(numIters):
self.backendFuncs.sync_barrier(self.collectiveArgs)
start = time.monotonic()
if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:
idx = self.collectiveArgs.src_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.send(
self.collectiveArgs, self.collectiveArgs.dst_ranks[idx]
)
elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:
idx = self.collectiveArgs.dst_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.recv(
self.collectiveArgs, self.collectiveArgs.src_ranks[idx]
)
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
pingLatencyNS.append(
(time.monotonic() - start) * 1e9
) # keeping time in NS, helps in divising data by nanosecond
logger.debug("STATUS: end ping test.")
return pingLatencyNS
def getPingPongLatency(self, numIters):
logger.debug(
"STATUS: begin ping-pong with src_ranks=%s, dst_ranks=%s."
% (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)
)
self.collectiveArgs.asyncOp = False
# get round-trip latency
pingPongLatencyNS = []
for _ in range(numIters):
self.backendFuncs.sync_barrier(self.collectiveArgs)
start = time.monotonic()
if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:
idx = self.collectiveArgs.src_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.send(
self.collectiveArgs, self.collectiveArgs.dst_ranks[idx]
)
self.backendFuncs.recv(
self.collectiveArgs, self.collectiveArgs.dst_ranks[idx]
)
elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:
idx = self.collectiveArgs.dst_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.recv(
self.collectiveArgs, self.collectiveArgs.src_ranks[idx]
)
self.backendFuncs.send(
self.collectiveArgs, self.collectiveArgs.src_ranks[idx]
)
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
pingPongLatencyNS.append(
(time.monotonic() - start) * 1e9
) # keeping time in NS, helps in divising data by nanosecond
logger.debug("STATUS: end ping-pong test.")
return pingPongLatencyNS
def getUniBW(self, numIters, memSize):
logger.debug(
"STATUS: begin UniBW test with src_ranks=%s, dst_ranks=%s."
% (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)
)
self.collectiveArgs.asyncOp = True
# get unidirectional bandwidth
uniLatencyNS = []
for _ in range(numIters):
self.backendFuncs.sync_barrier(self.collectiveArgs)
start = time.monotonic()
for w in range(self.collectiveArgs.window):
if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:
idx = self.collectiveArgs.src_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.isend(
self.collectiveArgs, self.collectiveArgs.dst_ranks[idx], tag=w
)
elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:
idx = self.collectiveArgs.dst_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.irecv(
self.collectiveArgs, self.collectiveArgs.src_ranks[idx], tag=w
)
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
uniLatencyNS.append(
(time.monotonic() - start) * 1e9
) # keeping time in NS, helps in divising data by nanosecond
uniLatencyNS = [lat / self.collectiveArgs.window for lat in uniLatencyNS]
uniLatencyNS = np.mean(np.array(uniLatencyNS))
_, avgUniBW = comms_utils.getAlgBW(uniLatencyNS, memSize, 1)
logger.debug("STATUS: end UniBW test.")
return avgUniBW
def getBiBW(self, numIters, memSize):
logger.debug(
"STATUS: begin BiBW test with src_ranks=%s, dst_ranks=%s."
% (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)
)
self.collectiveArgs.asyncOp = True
# get bidirectional bandwidth
biLatencyNS = []
for _ in range(numIters):
self.backendFuncs.sync_barrier(self.collectiveArgs)
start = time.monotonic()
for w in range(self.collectiveArgs.window):
if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:
idx = self.collectiveArgs.src_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.isend(
self.collectiveArgs, self.collectiveArgs.dst_ranks[idx], tag=w
)
self.backendFuncs.irecv(
self.collectiveArgs,
self.collectiveArgs.dst_ranks[idx],
tag=w + self.collectiveArgs.window,
)
elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:
idx = self.collectiveArgs.dst_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.irecv(
self.collectiveArgs, self.collectiveArgs.src_ranks[idx], tag=w
)
self.backendFuncs.isend(
self.collectiveArgs,
self.collectiveArgs.src_ranks[idx],
tag=w + self.collectiveArgs.window,
)
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
biLatencyNS.append(
(time.monotonic() - start) * 1e9
) # keeping time in NS, helps in divising data by nanosecond
biLatencyNS = [lat / self.collectiveArgs.window for lat in biLatencyNS]
biLatencyNS = np.mean(np.array(biLatencyNS))
_, avgBiBW = comms_utils.getAlgBW(biLatencyNS, 2 * memSize, 1)
logger.debug("STATUS: end UniBW test.")
return avgBiBW
def checkPt2PtRanks(self):
# set default values
if not self.collectiveArgs.src_ranks:
self.collectiveArgs.src_ranks = [0]
if not self.collectiveArgs.dst_ranks:
self.collectiveArgs.dst_ranks = [1]
# sanity check
if self.collectiveArgs.pt2pt == "one2one":
if (
len(self.collectiveArgs.src_ranks) > 1
or len(self.collectiveArgs.dst_ranks) > 1
):
if self.global_rank == 0:
logger.error(
"One2one Pt2Pt requires only a single rank is specified in src_ranks and dst_ranks! "
)
comms_utils.gracefulExit()
elif self.collectiveArgs.pt2pt == "pairwise":
# pairwise pt2pt requires identical number of ranks in src_ranks and dst_ranks.
if len(self.collectiveArgs.src_ranks) != len(self.collectiveArgs.dst_ranks):
if self.global_rank == 0:
logger.error(
"Pairwise Pt2Pt requires identical number of members in src_ranks and dst_ranks! "
)
comms_utils.gracefulExit()
# pairwise pt2pt does not allow same rank to exist in both groups
if bool(
set(self.collectiveArgs.src_ranks).intersection(
self.collectiveArgs.dst_ranks
)
):
if self.global_rank == 0:
logger.error(
"Pairwise Pt2Pt requires distinct members in src_ranks and dst_ranks! "
)
comms_utils.gracefulExit()
if self.global_rank == 0:
print(
f"\t collective={self.collectiveArgs.collective}\t{self.collectiveArgs.pt2pt}, src_ranks={self.collectiveArgs.src_ranks}, dst_ranks={self.collectiveArgs.dst_ranks}"
)
def checkCollectiveRanks(self):
if self.collectiveArgs.collective == "incast":
# incast: set default value and exclude root
if not self.collectiveArgs.src_ranks:
self.collectiveArgs.src_ranks = [*range(self.comm_size)]
if self.collectiveArgs.srcOrDst in self.collectiveArgs.src_ranks:
self.collectiveArgs.src_ranks.remove(self.collectiveArgs.srcOrDst)
elif self.collectiveArgs.collective == "multicast":
# multicast: set default value and exclude root
if not self.collectiveArgs.dst_ranks:
self.collectiveArgs.dst_ranks = [*range(self.comm_size)]
if self.collectiveArgs.srcOrDst in self.collectiveArgs.dst_ranks:
self.collectiveArgs.dst_ranks.remove(self.collectiveArgs.srcOrDst)
if self.global_rank == 0:
print(
f"\t collective={self.collectiveArgs.collective}, src_ranks={self.collectiveArgs.src_ranks}, dst_ranks={self.collectiveArgs.dst_ranks}"
)
def initCollectiveArgs(self, commsParams):
# lint was complaining that benchTime was too complex!
(
local_rank,
global_rank,
world_size,
group,
curDevice,
curHwDevice,
) = comms_utils.get_rank_details(
self.backendFuncs
) # Getting ranks from backednFuncs object, since we cannot use MPI (e.g.: TPU) to launch all the processes.
self.backendFuncs.sayHello() # Informs us where each process is running.
groups = self.backendFuncs.get_groups()
num_pgs = len(groups)
self.comm_size = world_size
self.global_rank = global_rank
comms_utils.fixBeginSize(
commsParams, world_size
) # Ensuring that all-reduce and all-to-all has atleast one member per rank.
allSizes = comms_utils.getSizes(
commsParams.beginSize, commsParams.endSize, commsParams.stepFactor
) # Given the begin-size, end-size, step-factor what are the message sizes to iterate on.
if global_rank == 0:
print(
f"[Rank {global_rank:>3}] allSizes: {allSizes} local_rank: {local_rank} element_size: {commsParams.element_size}"
)
self.collectiveArgs.group = group
self.collectiveArgs.groups = groups
self.collectiveArgs.num_pgs = num_pgs
self.collectiveArgs.device = curDevice
self.collectiveArgs.world_size = world_size
self.collectiveArgs.numIters = commsParams.numIters
self.collectiveArgs.numWarmupIters = commsParams.numWarmupIters
self.collectiveArgs.global_rank = global_rank
self.collectiveArgs.backendFuncs = self.backendFuncs
self.collectiveArgs.collective = commsParams.collective
op = self.backendFuncs.get_reduce_op("sum")
self.collectiveArgs.op = op
self.collectiveArgs.srcOrDst = commsParams.srcOrDst
self.collectiveArgs.src_ranks = commsParams.src_ranks
self.collectiveArgs.dst_ranks = commsParams.dst_ranks
self.collectiveArgs.pair = commsParams.pair
self.collectiveArgs.collective_pair = commsParams.collective_pair
self.collectiveArgs.pt2pt = commsParams.pt2pt
self.collectiveArgs.window = commsParams.window
self.collectiveArgs.asyncOp = False if commsParams.blockingFlag == 1 else True
if commsParams.bitwidth < 32:
comms_utils.initQuantCommCtx(self.collectiveArgs, commsParams)
if self.collectiveArgs.collective == "pt2pt":
self.checkPt2PtRanks()
else:
self.checkCollectiveRanks()
computeFunc = self.backendFuncs.noop
if (
commsParams.mode != "comms"
): # Compute mode related initialization if not in comms-only mode
if commsParams.kernel == "gemm":
computeFunc = self.backendFuncs.gemm
mm_dim = commsParams.mm_dim
in1 = np.random.rand(mm_dim, mm_dim)
MMin1 = torch.FloatTensor(in1).to(curDevice)
in2 = np.random.rand(mm_dim, mm_dim)
MMin2 = torch.FloatTensor(in2).to(curDevice)
in3 = np.random.rand(mm_dim, mm_dim)
MMin3 = torch.FloatTensor(in3).to(curDevice)
MMout = self.backendFuncs.alloc_empty(
[mm_dim, mm_dim], commsParams.dtype, curDevice
)
self.collectiveArgs.MMout = MMout
self.collectiveArgs.MMin1 = MMin1
self.collectiveArgs.MMin2 = MMin2
self.collectiveArgs.MMin3 = MMin3
self.collectiveArgs.numComputePerColl = commsParams.num_compute
elif commsParams.kernel == "emb_lookup":
computeFunc = self.backendFuncs.emb_lookup
emb_dim = commsParams.emb_dim
num_embeddings = commsParams.num_embs
avg_length = commsParams.avg_len
batch_size = commsParams.batch_size
print(
f"emb_dim {emb_dim} num_embs {num_embeddings} avg_len {avg_length} bs {batch_size}"
)
self.collectiveArgs.EmbWeights = self.backendFuncs.alloc_empty(
[num_embeddings, emb_dim], torch.double, curDevice
)
self.collectiveArgs.TableOffsets = torch.LongTensor(
[0, num_embeddings]
).to(curDevice)
self.collectiveArgs.Indices = torch.LongTensor(
np.random.randint(0, num_embeddings - 1, avg_length * batch_size)
).to(curDevice)
lengths = np.ones((1, batch_size)) * avg_length
flat_lengths = lengths.flatten()
self.collectiveArgs.Offsets = torch.LongTensor(
[0] + np.cumsum(flat_lengths).tolist()
).to(curDevice)
self.collectiveArgs.LookupOut = self.backendFuncs.alloc_empty(
[batch_size, emb_dim], torch.double, curDevice
)
self.collectiveArgs.AvgLengths = avg_length
self.collectiveArgs.numComputePerColl = commsParams.num_compute
return (
local_rank,
global_rank,
world_size,
group,
curDevice,
curHwDevice,
allSizes,
computeFunc,
)
def gatherBenchTime(self, collectiveArgs, commsParams, timeUsElapsedList):
# Push the list to device, then do an all-gather.
timeElapsedTensor = torch.tensor(
timeUsElapsedList, device=self.backendFuncs.get_device()
)
collectiveArgs.opTensor = None
if commsParams.backend != "xla":
timeList = list(torch.ones(
(self.comm_size,) + timeElapsedTensor.shape,
dtype=timeElapsedTensor.dtype,
device=timeElapsedTensor.device,
).unbind(0))
collectiveArgs.opTensor = timeList
collectiveArgs.ipTensor = timeElapsedTensor
collectiveArgs.asyncOp = False
collectiveArgs.dataSize = (
timeElapsedTensor.nelement() * timeElapsedTensor.element_size()
)
collectiveArgs.numElements = timeElapsedTensor.nelement()
# use allgather as all process group should support it
self.backendFuncs.all_gather(collectiveArgs)
self.backendFuncs.complete_accel_ops(collectiveArgs)
return timeList
def printPreamble(self, commsParams):
logger.debug(f"\tcommsParams: {str(commsParams.__dict__)}")
header = "\n\tCOMMS-RES"
if self.collectiveArgs.collective == "pt2pt":
header += "{:>15}{:>20}{:>10}{:>10}{:>25}{:>10}{:>10}{:>15}{:>15}{:>18}{:>18}".format(
"size (B)",
"pingLatency(us):p50",
"p75",
"p95",
"pingPongLatency(us):p50",
"p75",
"p95",
"avgUniBW(GB/s)",
"avgBiBW(GB/s)",
"totalUniBW(GB/s)",
"totalBiBW(GB/s)",
)
else:
if commsParams.bitwidth < 32:
header += "-QUANT\t{:>15}{:>18}{:>25}{:>15}{:>15}{:>15}".format(
"size (B)",
"nElementsPerRank",
"P95 Latency(us): Quant",
"Comms",
"De-Quant",
"Overall",
)
elif not self.collectiveArgs.pair:
header += (
"{:>15}{:>18}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}".format(
"size (B)",
"nElementsPerRank",
"Latency(us):p50",
"p75",
"p95",
"Min",
"Max",
"AlgBW(GB/s)",
"BusBW(GB/s)",
)
)
else:
header += "{:>15}{:>18}{:>22}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}".format(
"total-size (B)",
"nElementsPerRank",
"nElementsPairPerRank",
"Latency(us):p50",
"p75",
"p95",
"Min",
"Max",
"AlgBW(GB/s)",
"BusBW(GB/s)",
)
print(header)
def reportBenchTimeCollWithQuant(
self,
commsParams,
results,
tensorList,
quantTimeTensorList,
dequantTimeTensorList,
):
if commsParams.backend == "xla":
latencyAcrossRanks = torch.transpose(tensorList.view(-1, 1), 0, 1)[0]
latencyAcrossRanks = latencyAcrossRanks.cpu().detach().numpy()
# quant tensor
quantLatencyAcrossRanks = torch.transpose(
quantTimeTensorList.view(-1, 1), 0, 1
)[0]
quantLatencyAcrossRanks = quantLatencyAcrossRanks.cpu().detach().numpy()
# dequant tensor
dequantLatencyAcrossRanks = torch.transpose(
dequantTimeTensorList.view(-1, 1), 0, 1
)[0]
dequantLatencyAcrossRanks = dequantLatencyAcrossRanks.cpu().detach().numpy()
else:
if isinstance(tensorList, list):
tensorList = [t.cpu().detach().numpy() for t in tensorList]
latencyAcrossRanks = np.array(tensorList)
# quant tensor
quantLatencyAcrossRanks = np.array(quantTimeTensorList)
# dequant tensor
dequantLatencyAcrossRanks = np.array(dequantTimeTensorList)
p95 = np.percentile(latencyAcrossRanks, 95)
quant_p95 = np.percentile(quantLatencyAcrossRanks, 95)
dequant_p95 = np.percentile(dequantLatencyAcrossRanks, 95)
print(
"\tCOMMS-RES-QUANT\t{:>15}{:>18}{:>25}{:>15}{:>15}{:>15}".format(
results["memSize"],
str("%d" % (results["numElements"])),
str("%.1f" % (quant_p95)),
str("%.1f" % (p95 - quant_p95 - dequant_p95)),
str("%.1f" % (dequant_p95)),
str("%.1f" % (p95)),
# str("%.3f" % (algBW)),
# str("%.3f" % (busBW)),
)
)
def reportBenchTime(
self,
commsParams,
results,
tensorList,
quantTimeTensorList,
dequantTimeTensorList,
):
# convernt num_elements to # of elements per rank
if commsParams.collective in ("all_to_all", "all_to_allv"):
results["numElements"] = int(
results["numElements"] // commsParams.comms_world_info.world_size
)
if commsParams.collective == "pt2pt":
self.reportBenchTimePt2Pt(commsParams, tensorList, results)
elif commsParams.bitwidth < 32:
self.reportBenchTimeCollWithQuant(
commsParams,
results,
tensorList,
quantTimeTensorList,
dequantTimeTensorList,
)
else:
self.reportBenchTimeColl(commsParams, results, tensorList)
def reportBenchTimeColl(self, commsParams, results, tensorList):
if commsParams.backend == "xla":
latencyAcrossRanks = torch.transpose(tensorList.view(-1, 1), 0, 1)[0]
latencyAcrossRanks = latencyAcrossRanks.cpu().detach().numpy()
else:
if isinstance(tensorList, list):
tensorList = [t.cpu().detach().numpy() for t in tensorList]
latencyAcrossRanks = np.array(tensorList)
logger.debug(f"Latency across all ranks: {latencyAcrossRanks}")
# Include only communicating ranks
if self.collectiveArgs.collective == "multicast":
commRanks = [self.collectiveArgs.srcOrDst] + self.collectiveArgs.dst_ranks
elif self.collectiveArgs.collective == "incast":
commRanks = [self.collectiveArgs.srcOrDst] + self.collectiveArgs.src_ranks
else:
commRanks = range(self.collectiveArgs.world_size)
latencyAcrossCommRanks = latencyAcrossRanks[commRanks]
logger.debug(
"Latency across communicating ranks (%s): %s"
% (commRanks, latencyAcrossCommRanks)
)
p50 = np.percentile(latencyAcrossCommRanks, 50)
p75 = np.percentile(latencyAcrossCommRanks, 75)
p95 = np.percentile(latencyAcrossCommRanks, 95)
minlat = np.amin(latencyAcrossCommRanks)
maxlat = np.amax(latencyAcrossCommRanks)
# adjust busBW
busBW = results["busBW"] * (commsParams.bitwidth / 32.0)
if not self.collectiveArgs.pair:
print(
"\tCOMMS-RES{:>15}{:>18}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}".format(
results["memSize"],
str("%d" % (results["numElements"])),
str("%.1f" % (p50)),
str("%.1f" % (p75)),
str("%.1f" % (p95)),
str("%.1f" % (minlat)),
str("%.1f" % (maxlat)),
str("%.3f" % (results["algBW"])),
str("%.3f" % (busBW)),
)
)
else:
# convernt to # of elements per rank
if commsParams.collective_pair in ("all_to_all", "all_to_allv"):
results["numElements_pair"] = int(
results["numElements_pair"]
// commsParams.comms_world_info.world_size
)
print(
"\tCOMMS-RES{:>15}{:>18}{:>22}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}".format(
results["memSize"],
str("%d" % (results["numElements"])),
str("%d" % (results["numElements_pair"])),
str("%.1f" % (p50)),
str("%.1f" % (p75)),
str("%.1f" % (p95)),
str("%.1f" % (minlat)),
str("%.1f" % (maxlat)),
str("%.3f" % (results["algBW"])),
str("%.3f" % (busBW)),
)
)
def reportBenchTimePt2Pt(self, commsParams, resultsAcrossRanks, results):
pingLatencyAcrossRanks = []
pingPongLatencyAcrossRanks = []
uniBWAcrossRanks = []
biBWAcrossRanks = []
# idx = 0
for curRankTensor in resultsAcrossRanks:
pingLatencyAcrossRanks.append(curRankTensor[0].item())
pingPongLatencyAcrossRanks.append(curRankTensor[1].item())
uniBWAcrossRanks.append(curRankTensor[2].item())
biBWAcrossRanks.append(curRankTensor[3].item())
pingLatencyAcrossRanks = np.array(pingLatencyAcrossRanks)
pingPongLatencyAcrossRanks = np.array(pingPongLatencyAcrossRanks)
uniBWAcrossRanks = np.array(uniBWAcrossRanks)
biBWAcrossRanks = np.array(biBWAcrossRanks)
# Include only communicating ranks
commRanks = self.collectiveArgs.src_ranks + self.collectiveArgs.dst_ranks
pingLatencyAcrossCommRanks = pingLatencyAcrossRanks[commRanks]
pingPongLatencyAcrossCommRanks = pingPongLatencyAcrossRanks[commRanks]
uniBWAcrossCommRanks = uniBWAcrossRanks[commRanks]
biBWAcrossCommRanks = biBWAcrossRanks[commRanks]
logger.debug(
"Ping latency across communicating ranks (%s): %s"
% (commRanks, pingLatencyAcrossCommRanks)
)
logger.debug(
"PingPong latency across communicating ranks (%s): %s"
% (commRanks, pingPongLatencyAcrossCommRanks)
)
logger.debug(
"UniBW across all communicating ranks (%s): %s"
% (commRanks, uniBWAcrossCommRanks)
)
logger.debug(
"BiBW across all communicating ranks (%s): %s"
% (commRanks, biBWAcrossCommRanks)
)
avgUniBW = np.mean(uniBWAcrossCommRanks)
avgBiBW = np.mean(biBWAcrossCommRanks)
totalUniBW = np.sum(uniBWAcrossCommRanks) / 2
totalBiBW = np.sum(biBWAcrossCommRanks) / 2
ping_p50 = np.percentile(pingLatencyAcrossCommRanks, 50)
ping_p75 = np.percentile(pingLatencyAcrossCommRanks, 75)
ping_p95 = np.percentile(pingLatencyAcrossCommRanks, 95)
ping_pong_p50 = np.percentile(pingPongLatencyAcrossCommRanks, 50)
ping_pong_p75 = np.percentile(pingPongLatencyAcrossCommRanks, 75)
ping_pong_p95 = np.percentile(pingPongLatencyAcrossCommRanks, 95)
print(
"\tCOMMS-RES{:>15}{:>20}{:>10}{:>10}{:>25}{:>10}{:>10}{:>15}{:>15}{:>18}{:>18}".format(
results["memSize"],
str("%.1f" % (ping_p50)),
str("%.1f" % (ping_p75)),
str("%.1f" % (ping_p95)),
str("%.1f" % (ping_pong_p50)),
str("%.1f" % (ping_pong_p75)),
str("%.1f" % (ping_pong_p95)),
str("%.3f" % (avgUniBW)),
str("%.3f" % (avgBiBW)),
str("%.3f" % (totalUniBW)),
str("%.3f" % (totalBiBW)),
)
)
def benchTime(self, index, commsParams, backendFuncs):
# Get NW stack specific parameters
(
local_rank,
global_rank,
world_size,
group,
curDevice,
curHwDevice,
allSizes,
computeFunc,
) = self.initCollectiveArgs(commsParams)
backendFuncs.sync_barrier(self.collectiveArgs)
if global_rank == 0:
self.printPreamble(commsParams)
for curSize in allSizes:
results = {}
timeUsElapsedList = []
quantTimeElapsedList = []
dequantTimeElapsedList = []
numElements = int(curSize // commsParams.element_size)
collectiveFunc = self.backendFuncs.noop
collectiveFunc_pair = self.backendFuncs.noop
if (
commsParams.mode != "compute"
): # comms specific initializations if not in compute-only mode
# set corresponding function pointers
if commsParams.collective != "pt2pt":
collectiveFunc = backendFuncs.collectiveFunc[commsParams.collective]
(
self.collectiveArgs.ipTensor,
self.collectiveArgs.opTensor,
) = self.prepComm(
curComm={
"in_msg_size": numElements,
"out_msg_size": numElements,
"world_size": world_size,
},
commsParams=commsParams,
)
# Setup the arguments.
self.collectiveArgs.dataSize = curSize
self.collectiveArgs.numElements = numElements
self.collectiveArgs.waitObj = []
results["numElements"] = numElements
if (
commsParams.pair and commsParams.mode != "compute"
): # comms-pair specific initializations if not in compute-only mode:
# set corresponding function pointers
collectiveFunc_pair = backendFuncs.collectiveFunc[
commsParams.collective_pair
]
# TODO: allow user to set specific size
# Setup the arguments.
self.collectiveArgs.dataSize_pair = curSize
self.collectiveArgs.numElements_pair = int(
self.collectiveArgs.dataSize_pair // commsParams.element_size
)
results["numElements_pair"] = self.collectiveArgs.numElements_pair
(
self.collectiveArgs.ipTensor_pair,
self.collectiveArgs.opTensor_pair,
) = self.prepComm(
curComm={
"in_msg_size": self.collectiveArgs.numElements_pair,
"out_msg_size": self.collectiveArgs.numElements_pair,
"world_size": world_size,
},
commsParams=commsParams,
)
# self.collectiveArgs has all the information on the experiment.
if commsParams.collective == "pt2pt":
results.update(self.runPt2Pt())
timeUsElapsedList = [
np.mean(np.array(results["pingPerIterNS"])) / 1e3,
np.mean(np.array(results["pingPongPerIterNS"])) / 1e3,
results["avgUniBW"],
results["avgBiBW"],
] # time in US
if (
global_rank in self.collectiveArgs.src_ranks
or global_rank in self.collectiveArgs.dst_ranks
):
logger.debug(timeUsElapsedList)
else:
results.update(
self.runColl(
comm_fn=collectiveFunc,
compute_fn=computeFunc,
comm_fn_pair=collectiveFunc_pair,
)
)
timeUsElapsedList = [results["timeUS"]]
# perfom data validation check on the final opTensor
if commsParams.dcheck == 1:
self.dcheck(commsParams, curSize, self.collectiveArgs.opTensor)
backendFuncs.clear_memory(self.collectiveArgs)
# gather quantization overhead if enabled
if commsParams.bitwidth < 32:
# calculate average (de-)quantization overhead
results["quantTimeUS"] = (
self.collectiveArgs.quant_time.getTimeUS()
/ self.collectiveArgs.numIters
)
results["dequantTimeUS"] = (
self.collectiveArgs.dequant_time.getTimeUS()
/ self.collectiveArgs.numIters
)
quantTimeElapsedList.append(results["quantTimeUS"])
dequantTimeElapsedList.append(results["dequantTimeUS"])
logger.debug(quantTimeElapsedList)
quantTimeElapsedList = self.gatherBenchTime(
self.collectiveArgs, commsParams, quantTimeElapsedList
)
dequantTimeElapsedList = self.gatherBenchTime(
self.collectiveArgs, commsParams, dequantTimeElapsedList
)
# gather and report performance to stdout
tensorList = self.gatherBenchTime(
self.collectiveArgs, commsParams, timeUsElapsedList
)
if global_rank == 0:
self.reportBenchTime(
commsParams,
results,
tensorList,
quantTimeElapsedList,
dequantTimeElapsedList,
)
self.backendFuncs.sync_barrier(
self.collectiveArgs, desc=f"curSize_{curSize}"
)
comms_utils.clearQuantCommCtx(self.collectiveArgs)
# wait rank 0 reports results to avoid other ranks mess up the output
self.backendFuncs.sync_barrier(self.collectiveArgs, "benchtime")
def runBench(self, comms_world_info, commsParams):
# Init the desired backend
if commsParams.nw_stack == "pytorch-dist":
from pytorch_dist_backend import PyTorchDistBackend
backendObj = PyTorchDistBackend(comms_world_info, commsParams)
elif commsParams.nw_stack == "pytorch-xla-tpu":
from pytorch_tpu_backend import PyTorchTPUBackend
backendObj = PyTorchTPUBackend(comms_world_info, commsParams)
else:
logger.error("Unsupported NW stack! ")
comms_utils.gracefulExit()
self.backendFuncs = backendObj
try:
backendObj.benchmark_comms()
except ValueError as ve:
if commsParams.backend == "ucc":
logger.critical("PyTorch UCC not implemented? {}".format(repr(ve)))
raise
def main():
collBenchObj = commsCollBench()
### parse arguments ###
parser = argparse.ArgumentParser(
description="PARAM-Comm Benchmark",
formatter_class=MultilineFormatter,
)
args, leftovers = collBenchObj.readArgs(parser)
collBenchObj.checkArgs(args)
comms_env_params = comms_utils.read_comms_env_vars()
if comms_env_params["global_rank"] == 0:
print("\t MPI environment: %s " % (str(comms_env_params)))
print(
"\t backend: %s nw-stack: %s mode: %s args.b: %d args.e: %d args.f: %d args.z: %s args.master_ip: %s "
% (
args.backend,
args.nw_stack,
args.mode,
args.b,
args.e,
args.f,
args.z,
args.master_ip,
)
)
element_size = torch.ones([1], dtype=args.dtype).element_size()
comms_world_info = comms_utils.comms_world_info_holder(
args.master_ip, args.master_port, args.num_tpu_cores, comms_env_params
)
commsParams = comms_utils.commsParamsHolder(
args, comms_world_info, element_size, collBenchObj.benchTime
)
if args.pair and args.overlap_pair_pgs:
commsParams.num_pgs = 2
collBenchObj.runBench(comms_world_info, commsParams)
if __name__ == "__main__":
main()
| 41.207516
| 180
| 0.567865
|
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import time
import comms_utils
import numpy as np
import torch
from comms_utils import paramCommsBench, ensureTensorFlush
"all_to_allv",
"all_gather",
"broadcast",
"reduce_scatter",
"reduce_scatter_base",
"all_gather_base",
"incast",
"multicast",
]
pt2ptPatterns = [
"one2one",
"pairwise",
]
logger = logging.getLogger(__name__)
class MultilineFormatter(argparse.ArgumentDefaultsHelpFormatter):
def _split_lines(self, text, width):
if text.startswith("R|"):
return text[2:].splitlines()
return argparse.ArgumentDefaultsHelpFormatter._split_lines(self, text, width)
class commsCollBench(paramCommsBench):
def __init__(self):
super().__init__(supportedNwstacks=["pytorch-dist", "pytorch-xla-tpu"])
def readArgs(self, parser):
super().readArgs(parser)
parser.add_argument(
"--w", type=int, default=5, help="number of warmup iterations"
)
parser.add_argument(
"--n", type=int, default=5, help="number of iterations"
)
parser.add_argument(
"--mode",
type=str,
default="comms",
help="benchmark mode",
choices=["comms", "compute", "dlrm", "comms-compute"],
)
parser.add_argument(
"--b", type=str, default="8", help="minimum size, in bytes, to start with"
)
parser.add_argument(
"--e", type=str, default="64", help="maximum size, in bytes, to end at"
)
parser.add_argument(
"--f", type=int, default=2, help="multiplication factor between sizes"
)
parser.add_argument(
"--collective",
type=str,
default="all_reduce",
help="Collective operation to be evaluated",
choices=supportedCollectives,
)
parser.add_argument(
"--kernel",
type=str,
default="gemm",
help="Compute kernel, used for comms-compute or compute mode",
choices=["gemm", "emb_lookup"],
)
parser.add_argument(
"--num-compute",
type=int,
default=100,
help="one collective for every NUM_COMPUTE compute kernels",
)
parser.add_argument(
"--mm-dim",
type=int,
default=100,
help="dimension size for GEMM compute kernel",
)
parser.add_argument(
"--emb-dim",
type=int,
default=128,
help="dimension size for Embedding table compute kernel",
)
parser.add_argument(
"--num-embs",
type=int,
default=100000,
help="Embedding table hash size for Embedding table compute kernel",
)
parser.add_argument(
"--avg-len",
type=int,
default=28,
help="Average lookup operations per sample",
) d_argument(
"--batch-size",
type=int,
default=512,
help="number of samples reading the table concurrently",
) "--root", type=int, default=0, help="root process for reduce benchmark"
)
parser.add_argument(
"--src-ranks",
type=str,
nargs="?",
help="R|src ranks for many-to-one incast pattern or pt2pt.\n"
"List of ranks separated by comma or a range specified by start:end.\n"
"Pt2pt one2one should set only one rank.\n"
"The default value of incast includes all ranks, pt2pt includes rank 0.",
)
parser.add_argument(
"--dst-ranks",
type=str,
nargs="?",
help="R|dst ranks for one-to-many multicast pattern or pt2pt.\n"
"List of ranks separated by comma or a range specified by start:end.\n"
"Pt2pt one2one should set only one rank\n"
"The default value of multicast includes all ranks, pt2pt includes rank 1.",
)
parser.add_argument(
"--pair",
action="store_true",
default=False,
help="Toggle to enable collective pair mode",
)
parser.add_argument(
"--collective-pair",
type=str,
default="all_reduce",
help="Collective pair operation to be evaluated",
choices=supportedCollectives,
)
parser.add_argument(
"--overlap-pair-pgs",
action="store_true",
default=False,
help="Toggle to enable overlapping collective pair with two pgs",
)
parser.add_argument(
"--pt2pt",
type=str,
default=None,
help="point to point pattern",
choices=pt2ptPatterns,
)
parser.add_argument(
"--window",
type=int,
default=100,
help="window size for pt2pt throughput test",
)
return parser.parse_known_args()
def checkArgs(self, args):
super().checkArgs(args)
if args.pt2pt is not None:
args.collective = "pt2pt"
if args.pt2pt not in pt2ptPatterns:
logger.error(
f"Specified pt2pt pattern: {args.pt2pt} is not one of the supported pt2pt patterns: {str(pt2ptPatterns)}"
)
comms_utils.gracefulExit()
args.b = comms_utils.parsesize(args.b)
args.e = comms_utils.parsesize(args.e)
args.dtype = self.dtypeMap[args.data_type]
if args.b < 1:
logger.warning(
f"Starting size (--b {args.b}) should be greater than 1 byte...fix and continue"
)
args.b = 1
if args.e < args.b:
logger.warning(
f"the begin-size (--b {args.b}) is larger than the end-size (--e {args.e})"
)
if args.device == "cpu" and args.backend == "nccl":
raise ValueError(f"NCCL is not supported for device type {args.device}")
if args.c == 1 and args.z == 0 and args.collective in ("all_reduce", "reduce", "reduce_scatter"):
logger.warning(
f"Data validation is not supported for {args.collective} in non-blocking mode, disabled and continue"
)
args.c = 0
if args.bitwidth < 32:
if args.device != "cuda":
logger.error(
f"collective quantization may not be fully supported for {args.device}"
)
comms_utils.checkQuantArgs(
args.collective,
args.dtype,
args.b,
args.quant_a2a_embedding_dim,
args.z,
)
def runColl(self, comm_fn=None, compute_fn=None, comm_fn_pair=None):
self.backendFuncs.complete_accel_ops(self.collectiveArgs, initOp=True)
self.backendFuncs.sync_barrier(self.collectiveArgs, desc="runColl_begin")
elapsedTimeNS = 0.0
is_blocking = not self.collectiveArgs.asyncOp
enable_comms = False if (comm_fn is None or comm_fn == self.backendFuncs.noop) else True
enable_compute = False if (compute_fn is None or compute_fn == self.backendFuncs.noop) else True
enable_comms_pair = False if (comm_fn_pair is None or comm_fn_pair == self.backendFuncs.noop) else True
if enable_comms_pair:
self.collectiveArgs.asyncOp = True
for nIter in range(
self.collectiveArgs.numWarmupIters + self.collectiveArgs.numIters
):
if nIter == self.collectiveArgs.numWarmupIters:
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
ensureTensorFlush(self.collectiveArgs.opTensor)
if enable_comms_pair:
ensureTensorFlush(self.collectiveArgs.opTensor_pair)
elapsedTimeNS = 0.0
self.collectiveArgs.quant_time.reset()
self.collectiveArgs.dequant_time.reset()
if enable_comms:
self.setTensorVal(self.collectiveArgs.opTensor)
if is_blocking:
self.backendFuncs.sync_barrier(self.collectiveArgs)
start = time.monotonic()
self.collectiveArgs.group = self.backendFuncs.get_next_group()
comm_fn(self.collectiveArgs)
self.collectiveArgs.group = self.backendFuncs.get_next_group()
comm_fn_pair(self.collectiveArgs, pair=enable_comms_pair)
if enable_compute:
for _ in range(self.collectiveArgs.numComputePerColl):
# TODO: investigate the cache effect
# Flush the cache
# _ = torch.rand(6 * 1024 * 1024 // 4).float() * 2 # V100 6MB L2 cache
compute_fn(self.collectiveArgs)
if is_blocking: # should be sychronous, wait for the collective
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
# Measuring time.
elapsedTimeNS += (
time.monotonic() - start
) * 1e9 # keeping time in NS, helps in divising data by nanosecond
start = time.monotonic() # available only in py3
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
end = time.monotonic() # available only in py3
ensureTensorFlush(self.collectiveArgs.opTensor)
if enable_comms_pair:
ensureTensorFlush(self.collectiveArgs.opTensor_pair)
elapsedTimeNS += (
end - start
) * 1e9 # keeping time in NS, helps in divising data by nanoseconds
memSize = self.backendFuncs.get_mem_size(self.collectiveArgs)
avgIterNS, algBW = comms_utils.getAlgBW(
elapsedTimeNS, memSize, self.collectiveArgs.numIters
)
busBW = self.backendFuncs.getBusBW(
self.collectiveArgs.collective,
algBW,
self.collectiveArgs,
)
if enable_comms_pair:
memSize_pair = self.backendFuncs.get_mem_size(
self.collectiveArgs, pair=enable_comms_pair
)
memSize += memSize_pair
_, algBW_pair = comms_utils.getAlgBW(
elapsedTimeNS, memSize_pair, self.collectiveArgs.numIters
)
algBW += algBW_pair
busBW += self.backendFuncs.getBusBW(
self.collectiveArgs.collective_pair,
algBW_pair,
self.collectiveArgs,
)
self.backendFuncs.sync_barrier(self.collectiveArgs, desc="runColl_end")
results = {
"timeUS": avgIterNS / 1e3,
"algBW": algBW,
"busBW": busBW,
"memSize": memSize,
}
return results
def runPt2Pt(self):
self.backendFuncs.complete_accel_ops(self.collectiveArgs, initOp=True)
# warm-up
memSize = self.backendFuncs.get_mem_size(self.collectiveArgs)
self.getPingLatency(self.collectiveArgs.numWarmupIters)
self.getPingPongLatency(self.collectiveArgs.numWarmupIters)
self.getUniBW(self.collectiveArgs.numWarmupIters, memSize)
self.getBiBW(self.collectiveArgs.numWarmupIters, memSize)
self.backendFuncs.sync_barrier(self.collectiveArgs, "runpt2pt_begin")
# pt2pt benchmark
pingPerIterNS = self.getPingLatency(self.collectiveArgs.numIters)
pingPongPerIterNS = self.getPingPongLatency(self.collectiveArgs.numIters)
avgUniBW = self.getUniBW(self.collectiveArgs.numIters, memSize)
avgBiBW = self.getBiBW(self.collectiveArgs.numIters, memSize)
self.backendFuncs.sync_barrier(self.collectiveArgs, "runpt2pt")
results = {
"pingPerIterNS": pingPerIterNS,
"pingPongPerIterNS": pingPongPerIterNS,
"avgUniBW": avgUniBW,
"avgBiBW": avgBiBW,
"memSize": memSize,
}
return results
def getPingLatency(self, numIters):
logger.debug(
"STATUS: begin ping test with src_ranks=%s, dst_ranks=%s."
% (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)
)
self.collectiveArgs.asyncOp = False
# get one-way latency
pingLatencyNS = []
for _ in range(numIters):
self.backendFuncs.sync_barrier(self.collectiveArgs)
start = time.monotonic()
if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:
idx = self.collectiveArgs.src_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.send(
self.collectiveArgs, self.collectiveArgs.dst_ranks[idx]
)
elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:
idx = self.collectiveArgs.dst_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.recv(
self.collectiveArgs, self.collectiveArgs.src_ranks[idx]
)
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
pingLatencyNS.append(
(time.monotonic() - start) * 1e9
) # keeping time in NS, helps in divising data by nanosecond
logger.debug("STATUS: end ping test.")
return pingLatencyNS
def getPingPongLatency(self, numIters):
logger.debug(
"STATUS: begin ping-pong with src_ranks=%s, dst_ranks=%s."
% (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)
)
self.collectiveArgs.asyncOp = False
# get round-trip latency
pingPongLatencyNS = []
for _ in range(numIters):
self.backendFuncs.sync_barrier(self.collectiveArgs)
start = time.monotonic()
if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:
idx = self.collectiveArgs.src_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.send(
self.collectiveArgs, self.collectiveArgs.dst_ranks[idx]
)
self.backendFuncs.recv(
self.collectiveArgs, self.collectiveArgs.dst_ranks[idx]
)
elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:
idx = self.collectiveArgs.dst_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.recv(
self.collectiveArgs, self.collectiveArgs.src_ranks[idx]
)
self.backendFuncs.send(
self.collectiveArgs, self.collectiveArgs.src_ranks[idx]
)
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
pingPongLatencyNS.append(
(time.monotonic() - start) * 1e9
) # keeping time in NS, helps in divising data by nanosecond
logger.debug("STATUS: end ping-pong test.")
return pingPongLatencyNS
def getUniBW(self, numIters, memSize):
logger.debug(
"STATUS: begin UniBW test with src_ranks=%s, dst_ranks=%s."
% (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)
)
self.collectiveArgs.asyncOp = True
# get unidirectional bandwidth
uniLatencyNS = []
for _ in range(numIters):
self.backendFuncs.sync_barrier(self.collectiveArgs)
start = time.monotonic()
for w in range(self.collectiveArgs.window):
if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:
idx = self.collectiveArgs.src_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.isend(
self.collectiveArgs, self.collectiveArgs.dst_ranks[idx], tag=w
)
elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:
idx = self.collectiveArgs.dst_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.irecv(
self.collectiveArgs, self.collectiveArgs.src_ranks[idx], tag=w
)
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
uniLatencyNS.append(
(time.monotonic() - start) * 1e9
) # keeping time in NS, helps in divising data by nanosecond
uniLatencyNS = [lat / self.collectiveArgs.window for lat in uniLatencyNS]
uniLatencyNS = np.mean(np.array(uniLatencyNS))
_, avgUniBW = comms_utils.getAlgBW(uniLatencyNS, memSize, 1)
logger.debug("STATUS: end UniBW test.")
return avgUniBW
def getBiBW(self, numIters, memSize):
logger.debug(
"STATUS: begin BiBW test with src_ranks=%s, dst_ranks=%s."
% (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)
)
self.collectiveArgs.asyncOp = True
# get bidirectional bandwidth
biLatencyNS = []
for _ in range(numIters):
self.backendFuncs.sync_barrier(self.collectiveArgs)
start = time.monotonic()
for w in range(self.collectiveArgs.window):
if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:
idx = self.collectiveArgs.src_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.isend(
self.collectiveArgs, self.collectiveArgs.dst_ranks[idx], tag=w
)
self.backendFuncs.irecv(
self.collectiveArgs,
self.collectiveArgs.dst_ranks[idx],
tag=w + self.collectiveArgs.window,
)
elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:
idx = self.collectiveArgs.dst_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.irecv(
self.collectiveArgs, self.collectiveArgs.src_ranks[idx], tag=w
)
self.backendFuncs.isend(
self.collectiveArgs,
self.collectiveArgs.src_ranks[idx],
tag=w + self.collectiveArgs.window,
)
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
biLatencyNS.append(
(time.monotonic() - start) * 1e9
) # keeping time in NS, helps in divising data by nanosecond
biLatencyNS = [lat / self.collectiveArgs.window for lat in biLatencyNS]
biLatencyNS = np.mean(np.array(biLatencyNS))
_, avgBiBW = comms_utils.getAlgBW(biLatencyNS, 2 * memSize, 1)
logger.debug("STATUS: end UniBW test.")
return avgBiBW
def checkPt2PtRanks(self):
# set default values
if not self.collectiveArgs.src_ranks:
self.collectiveArgs.src_ranks = [0]
if not self.collectiveArgs.dst_ranks:
self.collectiveArgs.dst_ranks = [1]
# sanity check
if self.collectiveArgs.pt2pt == "one2one":
if (
len(self.collectiveArgs.src_ranks) > 1
or len(self.collectiveArgs.dst_ranks) > 1
):
if self.global_rank == 0:
logger.error(
"One2one Pt2Pt requires only a single rank is specified in src_ranks and dst_ranks! "
)
comms_utils.gracefulExit()
elif self.collectiveArgs.pt2pt == "pairwise":
# pairwise pt2pt requires identical number of ranks in src_ranks and dst_ranks.
if len(self.collectiveArgs.src_ranks) != len(self.collectiveArgs.dst_ranks):
if self.global_rank == 0:
logger.error(
"Pairwise Pt2Pt requires identical number of members in src_ranks and dst_ranks! "
)
comms_utils.gracefulExit()
# pairwise pt2pt does not allow same rank to exist in both groups
if bool(
set(self.collectiveArgs.src_ranks).intersection(
self.collectiveArgs.dst_ranks
)
):
if self.global_rank == 0:
logger.error(
"Pairwise Pt2Pt requires distinct members in src_ranks and dst_ranks! "
)
comms_utils.gracefulExit()
if self.global_rank == 0:
print(
f"\t collective={self.collectiveArgs.collective}\t{self.collectiveArgs.pt2pt}, src_ranks={self.collectiveArgs.src_ranks}, dst_ranks={self.collectiveArgs.dst_ranks}"
)
def checkCollectiveRanks(self):
if self.collectiveArgs.collective == "incast":
# incast: set default value and exclude root
if not self.collectiveArgs.src_ranks:
self.collectiveArgs.src_ranks = [*range(self.comm_size)]
if self.collectiveArgs.srcOrDst in self.collectiveArgs.src_ranks:
self.collectiveArgs.src_ranks.remove(self.collectiveArgs.srcOrDst)
elif self.collectiveArgs.collective == "multicast":
# multicast: set default value and exclude root
if not self.collectiveArgs.dst_ranks:
self.collectiveArgs.dst_ranks = [*range(self.comm_size)]
if self.collectiveArgs.srcOrDst in self.collectiveArgs.dst_ranks:
self.collectiveArgs.dst_ranks.remove(self.collectiveArgs.srcOrDst)
if self.global_rank == 0:
print(
f"\t collective={self.collectiveArgs.collective}, src_ranks={self.collectiveArgs.src_ranks}, dst_ranks={self.collectiveArgs.dst_ranks}"
)
def initCollectiveArgs(self, commsParams):
# lint was complaining that benchTime was too complex!
(
local_rank,
global_rank,
world_size,
group,
curDevice,
curHwDevice,
) = comms_utils.get_rank_details(
self.backendFuncs
) # Getting ranks from backednFuncs object, since we cannot use MPI (e.g.: TPU) to launch all the processes.
self.backendFuncs.sayHello() # Informs us where each process is running.
groups = self.backendFuncs.get_groups()
num_pgs = len(groups)
self.comm_size = world_size
self.global_rank = global_rank
comms_utils.fixBeginSize(
commsParams, world_size
) # Ensuring that all-reduce and all-to-all has atleast one member per rank.
allSizes = comms_utils.getSizes(
commsParams.beginSize, commsParams.endSize, commsParams.stepFactor
) # Given the begin-size, end-size, step-factor what are the message sizes to iterate on.
if global_rank == 0:
print(
f"[Rank {global_rank:>3}] allSizes: {allSizes} local_rank: {local_rank} element_size: {commsParams.element_size}"
)
self.collectiveArgs.group = group
self.collectiveArgs.groups = groups
self.collectiveArgs.num_pgs = num_pgs
self.collectiveArgs.device = curDevice
self.collectiveArgs.world_size = world_size
self.collectiveArgs.numIters = commsParams.numIters
self.collectiveArgs.numWarmupIters = commsParams.numWarmupIters
self.collectiveArgs.global_rank = global_rank
self.collectiveArgs.backendFuncs = self.backendFuncs
self.collectiveArgs.collective = commsParams.collective
op = self.backendFuncs.get_reduce_op("sum")
self.collectiveArgs.op = op
self.collectiveArgs.srcOrDst = commsParams.srcOrDst
self.collectiveArgs.src_ranks = commsParams.src_ranks
self.collectiveArgs.dst_ranks = commsParams.dst_ranks
self.collectiveArgs.pair = commsParams.pair
self.collectiveArgs.collective_pair = commsParams.collective_pair
self.collectiveArgs.pt2pt = commsParams.pt2pt
self.collectiveArgs.window = commsParams.window
self.collectiveArgs.asyncOp = False if commsParams.blockingFlag == 1 else True
if commsParams.bitwidth < 32:
comms_utils.initQuantCommCtx(self.collectiveArgs, commsParams)
if self.collectiveArgs.collective == "pt2pt":
self.checkPt2PtRanks()
else:
self.checkCollectiveRanks()
computeFunc = self.backendFuncs.noop
if (
commsParams.mode != "comms"
): # Compute mode related initialization if not in comms-only mode
if commsParams.kernel == "gemm":
computeFunc = self.backendFuncs.gemm
mm_dim = commsParams.mm_dim
in1 = np.random.rand(mm_dim, mm_dim)
MMin1 = torch.FloatTensor(in1).to(curDevice)
in2 = np.random.rand(mm_dim, mm_dim)
MMin2 = torch.FloatTensor(in2).to(curDevice)
in3 = np.random.rand(mm_dim, mm_dim)
MMin3 = torch.FloatTensor(in3).to(curDevice)
MMout = self.backendFuncs.alloc_empty(
[mm_dim, mm_dim], commsParams.dtype, curDevice
)
self.collectiveArgs.MMout = MMout
self.collectiveArgs.MMin1 = MMin1
self.collectiveArgs.MMin2 = MMin2
self.collectiveArgs.MMin3 = MMin3
self.collectiveArgs.numComputePerColl = commsParams.num_compute
elif commsParams.kernel == "emb_lookup":
computeFunc = self.backendFuncs.emb_lookup
emb_dim = commsParams.emb_dim
num_embeddings = commsParams.num_embs
avg_length = commsParams.avg_len
batch_size = commsParams.batch_size
print(
f"emb_dim {emb_dim} num_embs {num_embeddings} avg_len {avg_length} bs {batch_size}"
)
self.collectiveArgs.EmbWeights = self.backendFuncs.alloc_empty(
[num_embeddings, emb_dim], torch.double, curDevice
)
self.collectiveArgs.TableOffsets = torch.LongTensor(
[0, num_embeddings]
).to(curDevice)
self.collectiveArgs.Indices = torch.LongTensor(
np.random.randint(0, num_embeddings - 1, avg_length * batch_size)
).to(curDevice)
lengths = np.ones((1, batch_size)) * avg_length
flat_lengths = lengths.flatten()
self.collectiveArgs.Offsets = torch.LongTensor(
[0] + np.cumsum(flat_lengths).tolist()
).to(curDevice)
self.collectiveArgs.LookupOut = self.backendFuncs.alloc_empty(
[batch_size, emb_dim], torch.double, curDevice
)
self.collectiveArgs.AvgLengths = avg_length
self.collectiveArgs.numComputePerColl = commsParams.num_compute
return (
local_rank,
global_rank,
world_size,
group,
curDevice,
curHwDevice,
allSizes,
computeFunc,
)
def gatherBenchTime(self, collectiveArgs, commsParams, timeUsElapsedList):
# Push the list to device, then do an all-gather.
timeElapsedTensor = torch.tensor(
timeUsElapsedList, device=self.backendFuncs.get_device()
)
collectiveArgs.opTensor = None
if commsParams.backend != "xla":
timeList = list(torch.ones(
(self.comm_size,) + timeElapsedTensor.shape,
dtype=timeElapsedTensor.dtype,
device=timeElapsedTensor.device,
).unbind(0))
collectiveArgs.opTensor = timeList
collectiveArgs.ipTensor = timeElapsedTensor
collectiveArgs.asyncOp = False
collectiveArgs.dataSize = (
timeElapsedTensor.nelement() * timeElapsedTensor.element_size()
)
collectiveArgs.numElements = timeElapsedTensor.nelement()
# use allgather as all process group should support it
self.backendFuncs.all_gather(collectiveArgs)
self.backendFuncs.complete_accel_ops(collectiveArgs)
return timeList
def printPreamble(self, commsParams):
logger.debug(f"\tcommsParams: {str(commsParams.__dict__)}")
header = "\n\tCOMMS-RES"
if self.collectiveArgs.collective == "pt2pt":
header += "{:>15}{:>20}{:>10}{:>10}{:>25}{:>10}{:>10}{:>15}{:>15}{:>18}{:>18}".format(
"size (B)",
"pingLatency(us):p50",
"p75",
"p95",
"pingPongLatency(us):p50",
"p75",
"p95",
"avgUniBW(GB/s)",
"avgBiBW(GB/s)",
"totalUniBW(GB/s)",
"totalBiBW(GB/s)",
)
else:
if commsParams.bitwidth < 32:
header += "-QUANT\t{:>15}{:>18}{:>25}{:>15}{:>15}{:>15}".format(
"size (B)",
"nElementsPerRank",
"P95 Latency(us): Quant",
"Comms",
"De-Quant",
"Overall",
)
elif not self.collectiveArgs.pair:
header += (
"{:>15}{:>18}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}".format(
"size (B)",
"nElementsPerRank",
"Latency(us):p50",
"p75",
"p95",
"Min",
"Max",
"AlgBW(GB/s)",
"BusBW(GB/s)",
)
)
else:
header += "{:>15}{:>18}{:>22}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}".format(
"total-size (B)",
"nElementsPerRank",
"nElementsPairPerRank",
"Latency(us):p50",
"p75",
"p95",
"Min",
"Max",
"AlgBW(GB/s)",
"BusBW(GB/s)",
)
print(header)
def reportBenchTimeCollWithQuant(
self,
commsParams,
results,
tensorList,
quantTimeTensorList,
dequantTimeTensorList,
):
if commsParams.backend == "xla":
latencyAcrossRanks = torch.transpose(tensorList.view(-1, 1), 0, 1)[0]
latencyAcrossRanks = latencyAcrossRanks.cpu().detach().numpy()
# quant tensor
quantLatencyAcrossRanks = torch.transpose(
quantTimeTensorList.view(-1, 1), 0, 1
)[0]
quantLatencyAcrossRanks = quantLatencyAcrossRanks.cpu().detach().numpy()
# dequant tensor
dequantLatencyAcrossRanks = torch.transpose(
dequantTimeTensorList.view(-1, 1), 0, 1
)[0]
dequantLatencyAcrossRanks = dequantLatencyAcrossRanks.cpu().detach().numpy()
else:
if isinstance(tensorList, list):
tensorList = [t.cpu().detach().numpy() for t in tensorList]
latencyAcrossRanks = np.array(tensorList)
# quant tensor
quantLatencyAcrossRanks = np.array(quantTimeTensorList)
# dequant tensor
dequantLatencyAcrossRanks = np.array(dequantTimeTensorList)
p95 = np.percentile(latencyAcrossRanks, 95)
quant_p95 = np.percentile(quantLatencyAcrossRanks, 95)
dequant_p95 = np.percentile(dequantLatencyAcrossRanks, 95)
print(
"\tCOMMS-RES-QUANT\t{:>15}{:>18}{:>25}{:>15}{:>15}{:>15}".format(
results["memSize"],
str("%d" % (results["numElements"])),
str("%.1f" % (quant_p95)),
str("%.1f" % (p95 - quant_p95 - dequant_p95)),
str("%.1f" % (dequant_p95)),
str("%.1f" % (p95)),
# str("%.3f" % (algBW)),
# str("%.3f" % (busBW)),
)
)
def reportBenchTime(
self,
commsParams,
results,
tensorList,
quantTimeTensorList,
dequantTimeTensorList,
):
# convernt num_elements to # of elements per rank
if commsParams.collective in ("all_to_all", "all_to_allv"):
results["numElements"] = int(
results["numElements"] // commsParams.comms_world_info.world_size
)
if commsParams.collective == "pt2pt":
self.reportBenchTimePt2Pt(commsParams, tensorList, results)
elif commsParams.bitwidth < 32:
self.reportBenchTimeCollWithQuant(
commsParams,
results,
tensorList,
quantTimeTensorList,
dequantTimeTensorList,
)
else:
self.reportBenchTimeColl(commsParams, results, tensorList)
def reportBenchTimeColl(self, commsParams, results, tensorList):
if commsParams.backend == "xla":
latencyAcrossRanks = torch.transpose(tensorList.view(-1, 1), 0, 1)[0]
latencyAcrossRanks = latencyAcrossRanks.cpu().detach().numpy()
else:
if isinstance(tensorList, list):
tensorList = [t.cpu().detach().numpy() for t in tensorList]
latencyAcrossRanks = np.array(tensorList)
logger.debug(f"Latency across all ranks: {latencyAcrossRanks}")
# Include only communicating ranks
if self.collectiveArgs.collective == "multicast":
commRanks = [self.collectiveArgs.srcOrDst] + self.collectiveArgs.dst_ranks
elif self.collectiveArgs.collective == "incast":
commRanks = [self.collectiveArgs.srcOrDst] + self.collectiveArgs.src_ranks
else:
commRanks = range(self.collectiveArgs.world_size)
latencyAcrossCommRanks = latencyAcrossRanks[commRanks]
logger.debug(
"Latency across communicating ranks (%s): %s"
% (commRanks, latencyAcrossCommRanks)
)
p50 = np.percentile(latencyAcrossCommRanks, 50)
p75 = np.percentile(latencyAcrossCommRanks, 75)
p95 = np.percentile(latencyAcrossCommRanks, 95)
minlat = np.amin(latencyAcrossCommRanks)
maxlat = np.amax(latencyAcrossCommRanks)
# adjust busBW
busBW = results["busBW"] * (commsParams.bitwidth / 32.0)
if not self.collectiveArgs.pair:
print(
"\tCOMMS-RES{:>15}{:>18}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}".format(
results["memSize"],
str("%d" % (results["numElements"])),
str("%.1f" % (p50)),
str("%.1f" % (p75)),
str("%.1f" % (p95)),
str("%.1f" % (minlat)),
str("%.1f" % (maxlat)),
str("%.3f" % (results["algBW"])),
str("%.3f" % (busBW)),
)
)
else:
# convernt to # of elements per rank
if commsParams.collective_pair in ("all_to_all", "all_to_allv"):
results["numElements_pair"] = int(
results["numElements_pair"]
// commsParams.comms_world_info.world_size
)
print(
"\tCOMMS-RES{:>15}{:>18}{:>22}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}".format(
results["memSize"],
str("%d" % (results["numElements"])),
str("%d" % (results["numElements_pair"])),
str("%.1f" % (p50)),
str("%.1f" % (p75)),
str("%.1f" % (p95)),
str("%.1f" % (minlat)),
str("%.1f" % (maxlat)),
str("%.3f" % (results["algBW"])),
str("%.3f" % (busBW)),
)
)
def reportBenchTimePt2Pt(self, commsParams, resultsAcrossRanks, results):
pingLatencyAcrossRanks = []
pingPongLatencyAcrossRanks = []
uniBWAcrossRanks = []
biBWAcrossRanks = []
# idx = 0
for curRankTensor in resultsAcrossRanks:
pingLatencyAcrossRanks.append(curRankTensor[0].item())
pingPongLatencyAcrossRanks.append(curRankTensor[1].item())
uniBWAcrossRanks.append(curRankTensor[2].item())
biBWAcrossRanks.append(curRankTensor[3].item())
pingLatencyAcrossRanks = np.array(pingLatencyAcrossRanks)
pingPongLatencyAcrossRanks = np.array(pingPongLatencyAcrossRanks)
uniBWAcrossRanks = np.array(uniBWAcrossRanks)
biBWAcrossRanks = np.array(biBWAcrossRanks)
# Include only communicating ranks
commRanks = self.collectiveArgs.src_ranks + self.collectiveArgs.dst_ranks
pingLatencyAcrossCommRanks = pingLatencyAcrossRanks[commRanks]
pingPongLatencyAcrossCommRanks = pingPongLatencyAcrossRanks[commRanks]
uniBWAcrossCommRanks = uniBWAcrossRanks[commRanks]
biBWAcrossCommRanks = biBWAcrossRanks[commRanks]
logger.debug(
"Ping latency across communicating ranks (%s): %s"
% (commRanks, pingLatencyAcrossCommRanks)
)
logger.debug(
"PingPong latency across communicating ranks (%s): %s"
% (commRanks, pingPongLatencyAcrossCommRanks)
)
logger.debug(
"UniBW across all communicating ranks (%s): %s"
% (commRanks, uniBWAcrossCommRanks)
)
logger.debug(
"BiBW across all communicating ranks (%s): %s"
% (commRanks, biBWAcrossCommRanks)
)
avgUniBW = np.mean(uniBWAcrossCommRanks)
avgBiBW = np.mean(biBWAcrossCommRanks)
totalUniBW = np.sum(uniBWAcrossCommRanks) / 2
totalBiBW = np.sum(biBWAcrossCommRanks) / 2
ping_p50 = np.percentile(pingLatencyAcrossCommRanks, 50)
ping_p75 = np.percentile(pingLatencyAcrossCommRanks, 75)
ping_p95 = np.percentile(pingLatencyAcrossCommRanks, 95)
ping_pong_p50 = np.percentile(pingPongLatencyAcrossCommRanks, 50)
ping_pong_p75 = np.percentile(pingPongLatencyAcrossCommRanks, 75)
ping_pong_p95 = np.percentile(pingPongLatencyAcrossCommRanks, 95)
print(
"\tCOMMS-RES{:>15}{:>20}{:>10}{:>10}{:>25}{:>10}{:>10}{:>15}{:>15}{:>18}{:>18}".format(
results["memSize"],
str("%.1f" % (ping_p50)),
str("%.1f" % (ping_p75)),
str("%.1f" % (ping_p95)),
str("%.1f" % (ping_pong_p50)),
str("%.1f" % (ping_pong_p75)),
str("%.1f" % (ping_pong_p95)),
str("%.3f" % (avgUniBW)),
str("%.3f" % (avgBiBW)),
str("%.3f" % (totalUniBW)),
str("%.3f" % (totalBiBW)),
)
)
def benchTime(self, index, commsParams, backendFuncs):
# Get NW stack specific parameters
(
local_rank,
global_rank,
world_size,
group,
curDevice,
curHwDevice,
allSizes,
computeFunc,
) = self.initCollectiveArgs(commsParams)
backendFuncs.sync_barrier(self.collectiveArgs)
if global_rank == 0:
self.printPreamble(commsParams)
for curSize in allSizes:
results = {}
timeUsElapsedList = []
quantTimeElapsedList = []
dequantTimeElapsedList = []
numElements = int(curSize // commsParams.element_size)
collectiveFunc = self.backendFuncs.noop
collectiveFunc_pair = self.backendFuncs.noop
if (
commsParams.mode != "compute"
): # comms specific initializations if not in compute-only mode
# set corresponding function pointers
if commsParams.collective != "pt2pt":
collectiveFunc = backendFuncs.collectiveFunc[commsParams.collective]
(
self.collectiveArgs.ipTensor,
self.collectiveArgs.opTensor,
) = self.prepComm(
curComm={
"in_msg_size": numElements,
"out_msg_size": numElements,
"world_size": world_size,
},
commsParams=commsParams,
)
# Setup the arguments.
self.collectiveArgs.dataSize = curSize
self.collectiveArgs.numElements = numElements
self.collectiveArgs.waitObj = []
results["numElements"] = numElements
if (
commsParams.pair and commsParams.mode != "compute"
): # comms-pair specific initializations if not in compute-only mode:
# set corresponding function pointers
collectiveFunc_pair = backendFuncs.collectiveFunc[
commsParams.collective_pair
]
# TODO: allow user to set specific size
# Setup the arguments.
self.collectiveArgs.dataSize_pair = curSize
self.collectiveArgs.numElements_pair = int(
self.collectiveArgs.dataSize_pair // commsParams.element_size
)
results["numElements_pair"] = self.collectiveArgs.numElements_pair
(
self.collectiveArgs.ipTensor_pair,
self.collectiveArgs.opTensor_pair,
) = self.prepComm(
curComm={
"in_msg_size": self.collectiveArgs.numElements_pair,
"out_msg_size": self.collectiveArgs.numElements_pair,
"world_size": world_size,
},
commsParams=commsParams,
)
# self.collectiveArgs has all the information on the experiment.
if commsParams.collective == "pt2pt":
results.update(self.runPt2Pt())
timeUsElapsedList = [
np.mean(np.array(results["pingPerIterNS"])) / 1e3,
np.mean(np.array(results["pingPongPerIterNS"])) / 1e3,
results["avgUniBW"],
results["avgBiBW"],
] # time in US
if (
global_rank in self.collectiveArgs.src_ranks
or global_rank in self.collectiveArgs.dst_ranks
):
logger.debug(timeUsElapsedList)
else:
results.update(
self.runColl(
comm_fn=collectiveFunc,
compute_fn=computeFunc,
comm_fn_pair=collectiveFunc_pair,
)
)
timeUsElapsedList = [results["timeUS"]]
# perfom data validation check on the final opTensor
if commsParams.dcheck == 1:
self.dcheck(commsParams, curSize, self.collectiveArgs.opTensor)
backendFuncs.clear_memory(self.collectiveArgs)
# gather quantization overhead if enabled
if commsParams.bitwidth < 32:
# calculate average (de-)quantization overhead
results["quantTimeUS"] = (
self.collectiveArgs.quant_time.getTimeUS()
/ self.collectiveArgs.numIters
)
results["dequantTimeUS"] = (
self.collectiveArgs.dequant_time.getTimeUS()
/ self.collectiveArgs.numIters
)
quantTimeElapsedList.append(results["quantTimeUS"])
dequantTimeElapsedList.append(results["dequantTimeUS"])
logger.debug(quantTimeElapsedList)
quantTimeElapsedList = self.gatherBenchTime(
self.collectiveArgs, commsParams, quantTimeElapsedList
)
dequantTimeElapsedList = self.gatherBenchTime(
self.collectiveArgs, commsParams, dequantTimeElapsedList
)
# gather and report performance to stdout
tensorList = self.gatherBenchTime(
self.collectiveArgs, commsParams, timeUsElapsedList
)
if global_rank == 0:
self.reportBenchTime(
commsParams,
results,
tensorList,
quantTimeElapsedList,
dequantTimeElapsedList,
)
self.backendFuncs.sync_barrier(
self.collectiveArgs, desc=f"curSize_{curSize}"
)
comms_utils.clearQuantCommCtx(self.collectiveArgs)
# wait rank 0 reports results to avoid other ranks mess up the output
self.backendFuncs.sync_barrier(self.collectiveArgs, "benchtime")
def runBench(self, comms_world_info, commsParams):
# Init the desired backend
if commsParams.nw_stack == "pytorch-dist":
from pytorch_dist_backend import PyTorchDistBackend
backendObj = PyTorchDistBackend(comms_world_info, commsParams)
elif commsParams.nw_stack == "pytorch-xla-tpu":
from pytorch_tpu_backend import PyTorchTPUBackend
backendObj = PyTorchTPUBackend(comms_world_info, commsParams)
else:
logger.error("Unsupported NW stack! ")
comms_utils.gracefulExit()
self.backendFuncs = backendObj
try:
backendObj.benchmark_comms()
except ValueError as ve:
if commsParams.backend == "ucc":
logger.critical("PyTorch UCC not implemented? {}".format(repr(ve)))
raise
def main():
collBenchObj = commsCollBench()
### parse arguments ###
parser = argparse.ArgumentParser(
description="PARAM-Comm Benchmark",
formatter_class=MultilineFormatter,
)
args, leftovers = collBenchObj.readArgs(parser)
collBenchObj.checkArgs(args)
comms_env_params = comms_utils.read_comms_env_vars()
if comms_env_params["global_rank"] == 0:
print("\t MPI environment: %s " % (str(comms_env_params)))
print(
"\t backend: %s nw-stack: %s mode: %s args.b: %d args.e: %d args.f: %d args.z: %s args.master_ip: %s "
% (
args.backend,
args.nw_stack,
args.mode,
args.b,
args.e,
args.f,
args.z,
args.master_ip,
)
)
element_size = torch.ones([1], dtype=args.dtype).element_size()
comms_world_info = comms_utils.comms_world_info_holder(
args.master_ip, args.master_port, args.num_tpu_cores, comms_env_params
)
commsParams = comms_utils.commsParamsHolder(
args, comms_world_info, element_size, collBenchObj.benchTime
)
if args.pair and args.overlap_pair_pgs:
commsParams.num_pgs = 2
collBenchObj.runBench(comms_world_info, commsParams)
if __name__ == "__main__":
main()
| true
| true
|
f717719b096d0afc9e9656a8a3d67b6ed3d90cd0
| 3,338
|
py
|
Python
|
mime/agent/script_agent_augmented.py
|
rjgpinel/mime-release
|
26a850c4ba5b702b86d068995614163338fb01df
|
[
"MIT"
] | null | null | null |
mime/agent/script_agent_augmented.py
|
rjgpinel/mime-release
|
26a850c4ba5b702b86d068995614163338fb01df
|
[
"MIT"
] | null | null | null |
mime/agent/script_agent_augmented.py
|
rjgpinel/mime-release
|
26a850c4ba5b702b86d068995614163338fb01df
|
[
"MIT"
] | null | null | null |
import itertools
import types
import numpy as np
import torch
import click
import gym
import time
import yaml
from robos2r.model import build_model
from .agent import Agent
from .script_agent import ScriptAgent, make_noised
from .utils import Rate
from PIL import Image
from pathlib import Path
from einops import rearrange
from torchvision import transforms as T
@click.command(help="script_agent env_name [options]")
@click.argument("env_name", type=str)
@click.option("-s", "--seed", default=0, help="seed")
@click.option("-t", "--times-repeat", default=1, help="times to repeat the script")
@click.option("-n", "--add-noise", is_flag=True, help="adding noise to actions or not")
@click.option(
"-sc",
"--skill-collection/--no-skill-collection",
is_flag=True,
help="whether to show the skills collection",
)
def main(env_name, seed, times_repeat, add_noise, skill_collection):
print("Loading Augmentor model...")
diffaug_model_path = "/home/rgarciap/Remote/models/diffs2r_new/resnet_adam_lr_1e-3_lraug0.01_bs_64_L8/"
diffaug_model_path = Path(diffaug_model_path)
diffaug_cfg_path = diffaug_model_path / "config.yml"
with open(str(diffaug_cfg_path), "rb") as f:
diffaug_cfg = yaml.load(f, Loader=yaml.FullLoader)
model_cfg = dict(
name="diffaug",
reg_output_size=3,
aug_pipeline=diffaug_cfg["aug_pipeline"],
multi=diffaug_cfg["multi_pipeline"],
num_layers=diffaug_cfg["num_layers"],
gumbel=diffaug_cfg["gumbel"],
backbone_name=diffaug_cfg["backbone_name"],
)
diffaug_model = build_model(model_cfg)
diffaug_ckp_path = diffaug_model_path / "best_checkpoint.pth"
checkpoint = torch.load(str(diffaug_ckp_path), map_location="cpu")
diffaug_model.load_state_dict(checkpoint["model"])
augmentor = diffaug_model.augmentor
augmentor.to("cpu")
augmentor.eval()
print("Model loaded")
env = gym.make(env_name)
scene = env.unwrapped.scene
scene.renders(True)
if skill_collection:
scene.skill_data_collection = True
env.seed(seed)
for _ in range(times_repeat):
obs = env.reset()
agent = ScriptAgent(env)
import matplotlib.pyplot as plt
done = False
i = 0
rate = Rate(scene.dt)
action = agent.get_action()
if add_noise:
make_noised(action)
frames = []
j = 0
while not done and action is not None:
obs, reward, done, info = env.step(action)
im = T.ToTensor()(obs["rgb0"]).unsqueeze(0)
mask = torch.tensor(obs["mask0"]).unsqueeze(0)
im, mask = augmentor((im, mask))
im = rearrange(im.detach().detach().squeeze(0).numpy(), "c h w -> h w c")
im = Image.fromarray((im * 255).astype(np.uint8))
im.save(f"0/output{j}.jpeg")
j += 1
action = agent.get_action()
if add_noise and action is not None:
make_noised(action)
if action is None:
info["failure_message"] = "End of Script."
if not info["success"]:
click.secho(
"Failure Seed {}: {}".format(seed, info["failure_message"]), fg="red"
)
print("Success", info["success"])
if __name__ == "__main__":
main()
| 31.490566
| 107
| 0.641402
|
import itertools
import types
import numpy as np
import torch
import click
import gym
import time
import yaml
from robos2r.model import build_model
from .agent import Agent
from .script_agent import ScriptAgent, make_noised
from .utils import Rate
from PIL import Image
from pathlib import Path
from einops import rearrange
from torchvision import transforms as T
@click.command(help="script_agent env_name [options]")
@click.argument("env_name", type=str)
@click.option("-s", "--seed", default=0, help="seed")
@click.option("-t", "--times-repeat", default=1, help="times to repeat the script")
@click.option("-n", "--add-noise", is_flag=True, help="adding noise to actions or not")
@click.option(
"-sc",
"--skill-collection/--no-skill-collection",
is_flag=True,
help="whether to show the skills collection",
)
def main(env_name, seed, times_repeat, add_noise, skill_collection):
print("Loading Augmentor model...")
diffaug_model_path = "/home/rgarciap/Remote/models/diffs2r_new/resnet_adam_lr_1e-3_lraug0.01_bs_64_L8/"
diffaug_model_path = Path(diffaug_model_path)
diffaug_cfg_path = diffaug_model_path / "config.yml"
with open(str(diffaug_cfg_path), "rb") as f:
diffaug_cfg = yaml.load(f, Loader=yaml.FullLoader)
model_cfg = dict(
name="diffaug",
reg_output_size=3,
aug_pipeline=diffaug_cfg["aug_pipeline"],
multi=diffaug_cfg["multi_pipeline"],
num_layers=diffaug_cfg["num_layers"],
gumbel=diffaug_cfg["gumbel"],
backbone_name=diffaug_cfg["backbone_name"],
)
diffaug_model = build_model(model_cfg)
diffaug_ckp_path = diffaug_model_path / "best_checkpoint.pth"
checkpoint = torch.load(str(diffaug_ckp_path), map_location="cpu")
diffaug_model.load_state_dict(checkpoint["model"])
augmentor = diffaug_model.augmentor
augmentor.to("cpu")
augmentor.eval()
print("Model loaded")
env = gym.make(env_name)
scene = env.unwrapped.scene
scene.renders(True)
if skill_collection:
scene.skill_data_collection = True
env.seed(seed)
for _ in range(times_repeat):
obs = env.reset()
agent = ScriptAgent(env)
import matplotlib.pyplot as plt
done = False
i = 0
rate = Rate(scene.dt)
action = agent.get_action()
if add_noise:
make_noised(action)
frames = []
j = 0
while not done and action is not None:
obs, reward, done, info = env.step(action)
im = T.ToTensor()(obs["rgb0"]).unsqueeze(0)
mask = torch.tensor(obs["mask0"]).unsqueeze(0)
im, mask = augmentor((im, mask))
im = rearrange(im.detach().detach().squeeze(0).numpy(), "c h w -> h w c")
im = Image.fromarray((im * 255).astype(np.uint8))
im.save(f"0/output{j}.jpeg")
j += 1
action = agent.get_action()
if add_noise and action is not None:
make_noised(action)
if action is None:
info["failure_message"] = "End of Script."
if not info["success"]:
click.secho(
"Failure Seed {}: {}".format(seed, info["failure_message"]), fg="red"
)
print("Success", info["success"])
if __name__ == "__main__":
main()
| true
| true
|
f71771ce6064be5fc44ff24790cce8db6106923c
| 8,762
|
py
|
Python
|
benchmark_runner/common/clouds/shared/s3/s3_operations.py
|
kpouget/benchmark-runner
|
eecdb57d12f8c17268800632722af8fe8046185a
|
[
"Apache-2.0"
] | 10
|
2021-07-21T21:44:20.000Z
|
2022-02-24T22:01:13.000Z
|
benchmark_runner/common/clouds/shared/s3/s3_operations.py
|
kpouget/benchmark-runner
|
eecdb57d12f8c17268800632722af8fe8046185a
|
[
"Apache-2.0"
] | 83
|
2021-07-20T14:37:44.000Z
|
2022-03-24T13:48:04.000Z
|
benchmark_runner/common/clouds/shared/s3/s3_operations.py
|
kpouget/benchmark-runner
|
eecdb57d12f8c17268800632722af8fe8046185a
|
[
"Apache-2.0"
] | 6
|
2021-07-14T21:12:48.000Z
|
2022-02-15T12:48:27.000Z
|
import os
import boto3
import typeguard
from botocore.exceptions import ClientError
from os import listdir
from os.path import isfile, join
from benchmark_runner.common.clouds.shared.s3.s3_operations_exceptions import S3FileNotUploaded, S3FileNotDownloaded, S3FileNotDeleted, S3KeyNotCreated, S3FileNotExist, S3FailedCreatePresingedURL
from benchmark_runner.main.environment_variables import environment_variables
class S3Operations:
""" This class is responsible for S3 operations """
def __init__(self, region_name: str = '', endpoint_url: str = None, aws_access_key_id: str = None, aws_secret_access_key: str = None):
# environment variables
self.__environment_variables_dict = environment_variables.environment_variables_dict
# must add region for pytest
if region_name:
self.__region = region_name
self.__endpoint_url = endpoint_url
self.__aws_access_key_id = aws_access_key_id
self.__aws_secret_access_key = aws_secret_access_key
else:
self.__region = self.__environment_variables_dict.get('region_name', '')
# must be None for pytest
self.__endpoint_url = self.__environment_variables_dict.get('endpoint_url', None)
self.__aws_access_key_id = self.__environment_variables_dict.get('access_key_id', '')
self.__aws_secret_access_key = self.__environment_variables_dict.get('secret_access_key', '')
self.__s3_client = boto3.client(service_name='s3',
region_name=self.__region,
endpoint_url=self.__endpoint_url,
aws_access_key_id=self.__aws_access_key_id,
aws_secret_access_key=self.__aws_secret_access_key)
@typeguard.typechecked
def upload_file(self, file_name_path: str, bucket: str, key: str, upload_file: str):
"""
This method upload file to s3
:param file_name_path:'/home/user/test.txt'
:param bucket:'benchmark'
:param key:'test-data'
:param upload_file:'test.txt'
:return:
"""
try:
self.__s3_client.upload_file(Filename=file_name_path,
Bucket=bucket,
Key=f'{key}/{upload_file}',
ExtraArgs={'ServerSideEncryption': 'AES256'})
except ClientError:
raise
except Exception:
raise S3FileNotUploaded
@typeguard.typechecked
def download_file(self, bucket: str, key: str, download_file: str, file_name_path: str):
"""
This method download file from s3
:param bucket:'benchmark'
:param key:'logs/ec2-idle/2021/01/19/18'
:param download_file: 'test.txt'
:param file_name_path:'D:\\Performance\\Projects\\py-image-service\\data\\rt_results\\test.txt'
:return:
"""
try:
if download_file:
self.__s3_client.download_file(Bucket=bucket, Key=f'{key}/{download_file}', Filename=file_name_path)
else:
self.__s3_client.download_file(Bucket=bucket, Key=key, Filename=file_name_path)
except ClientError:
raise
except Exception:
raise S3FileNotDownloaded
@typeguard.typechecked
def delete_file(self, bucket: str, key: str, file_name: str):
"""
This method delete file from s3
:param bucket:'benchmark'
:param key:'test-data'
:param file_name: 'test.txt'
:return:
"""
try:
self.__s3_client.delete_object(Bucket=bucket, Key=f'{key}/{file_name}')
except ClientError:
raise
except Exception:
raise S3FileNotDeleted
@typeguard.typechecked
def delete_folder(self, bucket: str, key: str):
"""
This method delete folder from s3
:param bucket:'benchmark'
:param key:'framework/test'
:return:
"""
try:
objects_to_delete = self.__s3_client.list_objects(Bucket=bucket, Prefix=key)
delete_keys = {
'Objects': [{'Key': k} for k in [obj['Key'] for obj in objects_to_delete.get('Contents', [])]]}
if delete_keys['Objects']:
self.__s3_client.delete_objects(Bucket=bucket, Delete=delete_keys)
except ClientError:
raise
except Exception:
raise S3FileNotDeleted
@typeguard.typechecked
def create_folder(self, bucket: str, key: str):
"""
This method download file from s3
:param bucket:'benchmark'
:param key:'framework/test'
:return:
"""
try:
self.__s3_client.put_object(Bucket=bucket, Key=key)
except ClientError:
raise
except Exception:
raise S3KeyNotCreated
@typeguard.typechecked
def file_exist(self, bucket: str, key: str, file_name: str):
"""
This method check if file exist
:param bucket:'benchmark'
:param key:'framework/test'
:param file_name:'file.txt'
:return:
"""
try:
response = self.__s3_client.list_objects_v2(Bucket=bucket, Prefix=key)
if response.get('Contents'):
for item in response['Contents']:
if file_name in item['Key']:
return True
return False
# Todo add custom error
except ClientError:
raise
except Exception:
raise S3FileNotExist
@typeguard.typechecked
def upload_objects(self, local_source: str, s3_target: str):
"""
This method upload local data folder to s3 target path
:param local_source: local data folder i.e. '/home/user/'
:param s3_target: target s3 path i.e. 'data_store/calc_image_data/'
:return:
"""
try:
if '/' in s3_target:
targets = s3_target.split('/')
bucket = targets[0]
key = '/'.join(targets[1:])
else:
bucket = s3_target
key = ''
files = [f for f in listdir(local_source) if isfile(join(local_source, f))]
for file in files:
filename = os.path.join(local_source, file)
self.upload_file(file_name_path=filename, bucket=bucket, key=key, upload_file=file)
except ClientError as err:
raise
except Exception:
raise S3FileNotUploaded
@typeguard.typechecked
def download_objects(self, s3_target: str, local_source: str):
"""
This method download from s3 target to local data folder
:param local_source: local data folder i.e. '/home/user/
:param s3_target: target s3 path i.e. 'data_store/calc_image_data/'
:return:
"""
files = []
try:
if '/' in s3_target:
targets = s3_target.split('/')
bucket = targets[0]
key = '/'.join(targets[1:])
else:
bucket = s3_target
key = ''
response = self.__s3_client.list_objects_v2(Bucket=bucket, Prefix=key)
if response.get('Contents'):
for item in response['Contents']:
if item['Key'].split('/')[-1]:
files.append(item['Key'].split('/')[-1])
else:
files.append(item['Key'])
for file in files:
file_name = os.path.join(local_source, file)
self.download_file(bucket=bucket, key=key, download_file=file, file_name_path=file_name)
except ClientError as err:
raise
except Exception:
raise S3FileNotDownloaded
@typeguard.typechecked
def generate_presigned_url(self, bucket: str, key: str, file_name: str):
"""
This method generate presigned url for specific uploaded object, default 7 days
:param bucket:'benchmark'
:param key:'logs/test-data'
:param file_name:'file.txt'
:return:
"""
try:
return self.__s3_client.generate_presigned_url('get_object',
Params={'Bucket': bucket, 'Key': f'{key}/{file_name}'},
ExpiresIn=604800)
# Todo add custom error
except ClientError:
raise
except Exception:
raise S3FailedCreatePresingedURL
| 37.127119
| 195
| 0.57738
|
import os
import boto3
import typeguard
from botocore.exceptions import ClientError
from os import listdir
from os.path import isfile, join
from benchmark_runner.common.clouds.shared.s3.s3_operations_exceptions import S3FileNotUploaded, S3FileNotDownloaded, S3FileNotDeleted, S3KeyNotCreated, S3FileNotExist, S3FailedCreatePresingedURL
from benchmark_runner.main.environment_variables import environment_variables
class S3Operations:
def __init__(self, region_name: str = '', endpoint_url: str = None, aws_access_key_id: str = None, aws_secret_access_key: str = None):
self.__environment_variables_dict = environment_variables.environment_variables_dict
if region_name:
self.__region = region_name
self.__endpoint_url = endpoint_url
self.__aws_access_key_id = aws_access_key_id
self.__aws_secret_access_key = aws_secret_access_key
else:
self.__region = self.__environment_variables_dict.get('region_name', '')
self.__endpoint_url = self.__environment_variables_dict.get('endpoint_url', None)
self.__aws_access_key_id = self.__environment_variables_dict.get('access_key_id', '')
self.__aws_secret_access_key = self.__environment_variables_dict.get('secret_access_key', '')
self.__s3_client = boto3.client(service_name='s3',
region_name=self.__region,
endpoint_url=self.__endpoint_url,
aws_access_key_id=self.__aws_access_key_id,
aws_secret_access_key=self.__aws_secret_access_key)
@typeguard.typechecked
def upload_file(self, file_name_path: str, bucket: str, key: str, upload_file: str):
try:
self.__s3_client.upload_file(Filename=file_name_path,
Bucket=bucket,
Key=f'{key}/{upload_file}',
ExtraArgs={'ServerSideEncryption': 'AES256'})
except ClientError:
raise
except Exception:
raise S3FileNotUploaded
@typeguard.typechecked
def download_file(self, bucket: str, key: str, download_file: str, file_name_path: str):
try:
if download_file:
self.__s3_client.download_file(Bucket=bucket, Key=f'{key}/{download_file}', Filename=file_name_path)
else:
self.__s3_client.download_file(Bucket=bucket, Key=key, Filename=file_name_path)
except ClientError:
raise
except Exception:
raise S3FileNotDownloaded
@typeguard.typechecked
def delete_file(self, bucket: str, key: str, file_name: str):
try:
self.__s3_client.delete_object(Bucket=bucket, Key=f'{key}/{file_name}')
except ClientError:
raise
except Exception:
raise S3FileNotDeleted
@typeguard.typechecked
def delete_folder(self, bucket: str, key: str):
try:
objects_to_delete = self.__s3_client.list_objects(Bucket=bucket, Prefix=key)
delete_keys = {
'Objects': [{'Key': k} for k in [obj['Key'] for obj in objects_to_delete.get('Contents', [])]]}
if delete_keys['Objects']:
self.__s3_client.delete_objects(Bucket=bucket, Delete=delete_keys)
except ClientError:
raise
except Exception:
raise S3FileNotDeleted
@typeguard.typechecked
def create_folder(self, bucket: str, key: str):
try:
self.__s3_client.put_object(Bucket=bucket, Key=key)
except ClientError:
raise
except Exception:
raise S3KeyNotCreated
@typeguard.typechecked
def file_exist(self, bucket: str, key: str, file_name: str):
try:
response = self.__s3_client.list_objects_v2(Bucket=bucket, Prefix=key)
if response.get('Contents'):
for item in response['Contents']:
if file_name in item['Key']:
return True
return False
except ClientError:
raise
except Exception:
raise S3FileNotExist
@typeguard.typechecked
def upload_objects(self, local_source: str, s3_target: str):
try:
if '/' in s3_target:
targets = s3_target.split('/')
bucket = targets[0]
key = '/'.join(targets[1:])
else:
bucket = s3_target
key = ''
files = [f for f in listdir(local_source) if isfile(join(local_source, f))]
for file in files:
filename = os.path.join(local_source, file)
self.upload_file(file_name_path=filename, bucket=bucket, key=key, upload_file=file)
except ClientError as err:
raise
except Exception:
raise S3FileNotUploaded
@typeguard.typechecked
def download_objects(self, s3_target: str, local_source: str):
files = []
try:
if '/' in s3_target:
targets = s3_target.split('/')
bucket = targets[0]
key = '/'.join(targets[1:])
else:
bucket = s3_target
key = ''
response = self.__s3_client.list_objects_v2(Bucket=bucket, Prefix=key)
if response.get('Contents'):
for item in response['Contents']:
if item['Key'].split('/')[-1]:
files.append(item['Key'].split('/')[-1])
else:
files.append(item['Key'])
for file in files:
file_name = os.path.join(local_source, file)
self.download_file(bucket=bucket, key=key, download_file=file, file_name_path=file_name)
except ClientError as err:
raise
except Exception:
raise S3FileNotDownloaded
@typeguard.typechecked
def generate_presigned_url(self, bucket: str, key: str, file_name: str):
try:
return self.__s3_client.generate_presigned_url('get_object',
Params={'Bucket': bucket, 'Key': f'{key}/{file_name}'},
ExpiresIn=604800)
except ClientError:
raise
except Exception:
raise S3FailedCreatePresingedURL
| true
| true
|
f71772584a21b6b069b86c34169c1b8debf5cb25
| 476
|
py
|
Python
|
erinn/python/models/__init__.py
|
swcjack6931677/ERINN
|
a4f3d0ad213515bc86e2a18575537d6affd472ac
|
[
"MIT"
] | null | null | null |
erinn/python/models/__init__.py
|
swcjack6931677/ERINN
|
a4f3d0ad213515bc86e2a18575537d6affd472ac
|
[
"MIT"
] | null | null | null |
erinn/python/models/__init__.py
|
swcjack6931677/ERINN
|
a4f3d0ad213515bc86e2a18575537d6affd472ac
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import CNN
from . import CNN1D
from . import CNN1D_Rx
from . import CNN1D_Tx
from . import DFN
# Globally-importable models.
from .CNN import get_cnn_relu
from .CNN1D import get_cnn1d_relu
from .CNN1D_Rx import get_cnn1d_rx_relu
from .CNN1D_Tx import get_cnn1d_tx
from .CNN1D_Tx import get_cnn1d_tx_relu
from .DFN import get_dfn_relu
| 25.052632
| 39
| 0.813025
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import CNN
from . import CNN1D
from . import CNN1D_Rx
from . import CNN1D_Tx
from . import DFN
from .CNN import get_cnn_relu
from .CNN1D import get_cnn1d_relu
from .CNN1D_Rx import get_cnn1d_rx_relu
from .CNN1D_Tx import get_cnn1d_tx
from .CNN1D_Tx import get_cnn1d_tx_relu
from .DFN import get_dfn_relu
| true
| true
|
f717729d6712f5bbd8b1c9f44dde5e6c1bc8107e
| 411
|
py
|
Python
|
server/config.py
|
ahnaf-zamil/flask-react-session-authenticaton-tutorial
|
88c454af9932435d0bd9ad1c16718beb6fc0e1c1
|
[
"MIT"
] | 1
|
2021-11-01T10:46:16.000Z
|
2021-11-01T10:46:16.000Z
|
server/config.py
|
ahnaf-zamil/flask-react-session-authenticaton-tutorial
|
88c454af9932435d0bd9ad1c16718beb6fc0e1c1
|
[
"MIT"
] | null | null | null |
server/config.py
|
ahnaf-zamil/flask-react-session-authenticaton-tutorial
|
88c454af9932435d0bd9ad1c16718beb6fc0e1c1
|
[
"MIT"
] | null | null | null |
from dotenv import load_dotenv
import os
import redis
load_dotenv()
class ApplicationConfig:
SECRET_KEY = os.environ["SECRET_KEY"]
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = True
SQLALCHEMY_DATABASE_URI = r"sqlite:///./db.sqlite"
SESSION_TYPE = "redis"
SESSION_PERMANENT = False
SESSION_USE_SIGNER = True
SESSION_REDIS = redis.from_url("redis://127.0.0.1:6379")
| 24.176471
| 60
| 0.734793
|
from dotenv import load_dotenv
import os
import redis
load_dotenv()
class ApplicationConfig:
SECRET_KEY = os.environ["SECRET_KEY"]
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = True
SQLALCHEMY_DATABASE_URI = r"sqlite:///./db.sqlite"
SESSION_TYPE = "redis"
SESSION_PERMANENT = False
SESSION_USE_SIGNER = True
SESSION_REDIS = redis.from_url("redis://127.0.0.1:6379")
| true
| true
|
f71772cc3a746eb0432c3f9d0877b4f980c06a5f
| 1,344
|
py
|
Python
|
Embed.py
|
zhengxiawu/Transformer
|
8cad013913254ea4e06c4a8d460d9f2cf42df086
|
[
"Apache-2.0"
] | null | null | null |
Embed.py
|
zhengxiawu/Transformer
|
8cad013913254ea4e06c4a8d460d9f2cf42df086
|
[
"Apache-2.0"
] | null | null | null |
Embed.py
|
zhengxiawu/Transformer
|
8cad013913254ea4e06c4a8d460d9f2cf42df086
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
import math
from torch.autograd import Variable
class Embedder(nn.Module):
def __init__(self, vocab_size, d_model):
super().__init__()
self.d_model = d_model
self.embed = nn.Embedding(vocab_size, d_model)
def forward(self, x):
return self.embed(x)
class PositionalEncoder(nn.Module):
def __init__(self, d_model, max_seq_len=200, dropout=0.1):
super().__init__()
self.d_model = d_model
self.dropout = nn.Dropout(dropout)
# create constant 'pe' matrix with values dependant on
# pos and i
pe = torch.zeros(max_seq_len, d_model)
for pos in range(max_seq_len):
for i in range(0, d_model, 2):
pe[pos, i] = \
math.sin(pos / (10000 ** ((2 * i)/d_model)))
pe[pos, i + 1] = \
math.cos(pos / (10000 ** ((2 * (i + 1))/d_model)))
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
# make embeddings relatively larger
x = x * math.sqrt(self.d_model)
# add constant to embedding
seq_len = x.size(1)
pe = Variable(self.pe[:, :seq_len], requires_grad=False)
if x.is_cuda:
pe.cuda()
x = x + pe
return self.dropout(x)
| 30.545455
| 70
| 0.563244
|
import torch
import torch.nn as nn
import math
from torch.autograd import Variable
class Embedder(nn.Module):
def __init__(self, vocab_size, d_model):
super().__init__()
self.d_model = d_model
self.embed = nn.Embedding(vocab_size, d_model)
def forward(self, x):
return self.embed(x)
class PositionalEncoder(nn.Module):
def __init__(self, d_model, max_seq_len=200, dropout=0.1):
super().__init__()
self.d_model = d_model
self.dropout = nn.Dropout(dropout)
pe = torch.zeros(max_seq_len, d_model)
for pos in range(max_seq_len):
for i in range(0, d_model, 2):
pe[pos, i] = \
math.sin(pos / (10000 ** ((2 * i)/d_model)))
pe[pos, i + 1] = \
math.cos(pos / (10000 ** ((2 * (i + 1))/d_model)))
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x * math.sqrt(self.d_model)
seq_len = x.size(1)
pe = Variable(self.pe[:, :seq_len], requires_grad=False)
if x.is_cuda:
pe.cuda()
x = x + pe
return self.dropout(x)
| true
| true
|
f71772d3409ea5f7a2383df7d2159cca0dc08add
| 8,729
|
py
|
Python
|
rl/train_a2c_mc.py
|
ds4dm/GraphRL
|
b5b1519f6dd92b401625d51add9ae5829004a30b
|
[
"MIT"
] | 2
|
2021-02-26T18:51:01.000Z
|
2021-07-12T05:20:18.000Z
|
rl/train_a2c_mc.py
|
pandat8/GraphRL
|
b5b1519f6dd92b401625d51add9ae5829004a30b
|
[
"MIT"
] | 3
|
2019-05-09T20:59:10.000Z
|
2020-05-13T14:03:50.000Z
|
rl/train_a2c_mc.py
|
pandat8/GraphRL
|
b5b1519f6dd92b401625d51add9ae5829004a30b
|
[
"MIT"
] | 3
|
2018-08-13T20:43:29.000Z
|
2020-05-13T14:00:57.000Z
|
import torch
import torch.optim as optm
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from data.graph import Graph
from collections import namedtuple
SavedAction = namedtuple('SavedAction', ['log_prob', 'value_current'])
# Mont Carlo methods
class TrainModel_MC:
def __init__(self, model, train_dataset, val_dataset, max_grad_norm=2, use_cuda=False):
self.model = model
self.train_dataset = train_dataset
self.val_dataset = val_dataset
self.max_grad_norm = max_grad_norm
self.use_cuda = use_cuda
self.train_loader = DataLoader(train_dataset, shuffle=True, num_workers=1, batch_size=1, collate_fn=lambda x: x)
self.val_loader = DataLoader(val_dataset, shuffle=True, num_workers=1, batch_size=1, collate_fn=lambda x: x)
self.epochs = 0
self.beta = 0.9
self.eps = np.finfo(np.float32).eps.item()
def train_and_validate(self, n_epochs, lr_actor, lr_critic, gamma=0.99, use_critic=True):
self.actor_optim = optm.Adam(self.model.actor.parameters(), lr=lr_actor)
print(use_critic)
if use_critic:
self.critic_optim = optm.Adam(self.model.critic.parameters(), lr=lr_critic)
self.critic_loss_criterion = torch.nn.MSELoss()
else:
baseline = torch.zeros(1)
if self.use_cuda:
baseline = baseline.cuda()
for epoch in range(1):
n_graphs_proceed = 0
for X in self.train_loader:
for x in X:
self.model.train()
ratio_gcn2mind = []
ratio_gcn2rand = []
for epoch in range(n_epochs):
rewards_mindegree = 0 # number of added edges
rewards_random = 0
x_mind = Graph(x.M)
x_rand = Graph(x.M)
x_rl = Graph(x.M)
# loop for training while eliminating a graph iteratively
for i in range(x.n - 2):
# baseline1: compute return of min degree
if i % 100 == 0:
print('iterations {}'.format(i))
node_mind, d_min = x_mind.min_degree(x_mind.M)
rewards_mindegree += x_mind.eliminate_node(node_mind, reduce=True)
# baseline2: compute return of random
rewards_random += x_rand.eliminate_node(np.random.randint(low=0, high=x_rand.n), reduce=True)
# call actor-critic model
action, log_prob, reward, value_current, value_next, x_rl = self.model(x_rl) # forward propagation,action: node selected, reward: nb edges added
self.model.rewards.append(reward)
self.model.actions.append(action)
self.model.saved_actions.append(SavedAction(log_prob, value_current))
R = 0
actor_losses = []
critic_losses = []
returns = []
# compute sampled return for each step
for r in self.model.rewards[::-1]:
R = r + gamma * R
returns.insert(0, R)
returns = torch.tensor(returns)
returns = (returns - returns.mean()) / (returns.std() + self.eps)
saved_actions = self.model.saved_actions
# compute cummulated loss of actor and critic of one graph
for (log_prob, value_current), R in zip(saved_actions, returns):
if use_critic:
advantage = R - value_current
critic_losses.append(-value_current* advantage)
# critic_losses.append(self.critic_loss_criterion(value_current, torch.Tensor([R.detach()])))
else:
advantage = R - baseline
actor_losses.append(log_prob * advantage.detach()) # the return here is discounted nb of added edges,
# hence, it actually represents loss
# step update of actor
self.actor_optim.zero_grad()
actor_loss = torch.stack(actor_losses).sum()
actor_loss.backward(retain_graph=True)
self.actor_optim.step()
# step update of critic
if use_critic:
self.critic_optim.zero_grad()
critic_closs = torch.stack(critic_losses).sum()
critic_closs.backward()
self.critic_optim.step()
else:
baseline = baseline.detach()
rewards_gcn = sum(self.model.rewards)
_ratio_gcn2mind = rewards_gcn / rewards_mindegree
_ratio_gcn2rand = rewards_gcn / rewards_random
print('graph {:04d}'.format(n_graphs_proceed), 'epoch {:04d}'.format(epoch),
'gcn2mind ratio {}'.format(_ratio_gcn2mind),
'value {}'.format(saved_actions[0].value_current),
'R {}'.format(returns[0]))
print('graph {:04d}'.format(n_graphs_proceed), 'epoch {:04d}'.format(epoch),
'gcn2rand ratio {}'.format(_ratio_gcn2rand))
ratio_gcn2mind.append(_ratio_gcn2mind)
ratio_gcn2rand.append(_ratio_gcn2rand)
del self.model.rewards[:]
del self.model.actions[:]
del self.model.saved_actions[:]
ratio_gcn2mind = np.array(ratio_gcn2mind).reshape(-1)
ratio_gcn2rand = np.array(ratio_gcn2rand).reshape(-1)
min_ratio_gcn2mind = np.min(ratio_gcn2mind)
max_ratio_gcn2mind = np.max(ratio_gcn2mind)
av_ratio_gcn2mind = np.sum(ratio_gcn2mind)/ n_epochs
min_ratio_gcn2rand = np.min(ratio_gcn2rand)
max_ratio_gcn2rand = np.max(ratio_gcn2rand)
av_ratio_gcn2rand = np.sum(ratio_gcn2rand) / n_epochs
print('graph {:04d}'.format(n_graphs_proceed), 'gcn2mind{:04d}',
'min_ratio {}'.format(min_ratio_gcn2mind),
'max_ratio {}'.format(max_ratio_gcn2mind),
'av_ratio {}'.format(av_ratio_gcn2mind))
print('graph {:04d}'.format(n_graphs_proceed), 'gcn2rand{:04d}',
'min_ratio {}'.format(min_ratio_gcn2rand),
'max_ratio {}'.format(max_ratio_gcn2rand),
'av_ratio {}'.format(av_ratio_gcn2rand),
'nb graph proceeded {}'.format(n_graphs_proceed))
n_graphs_proceed += len(X)
# ratio_gcn2mind = np.array(ratio_gcn2mind).reshape(-1)
# ratio_gcn2rand = np.array(ratio_gcn2rand).reshape(-1)
#
# total_ratio_gcn2mind = np.sum(ratio_gcn2mind)
# total_ratio_gcn2rand = np.sum(ratio_gcn2rand)
#
# min_ratio_gcn2mind = np.min(ratio_gcn2mind)
# max_ratio_gcn2mind = np.max(ratio_gcn2mind)
# av_ratio_gcn2mind = total_ratio_gcn2mind / n_graphs_proceed
#
# min_ratio_gcn2rand = np.min(ratio_gcn2rand)
# max_ratio_gcn2rand = np.max(ratio_gcn2rand)
# av_ratio_gcn2rand = total_ratio_gcn2rand / n_graphs_proceed
#
# print('epoch {:04d}'.format(epoch), 'gcn2mind{:04d}',
# 'min_ratio {}'.format(min_ratio_gcn2mind),
# 'max_ratio {}'.format(max_ratio_gcn2mind),
# 'av_ratio {}'.format(av_ratio_gcn2mind))
# print('epoch {:04d}'.format(epoch), 'gcn2rand{:04d}',
# 'min_ratio {}'.format(min_ratio_gcn2rand),
# 'max_ratio {}'.format(max_ratio_gcn2rand),
# 'av_ratio {}'.format(av_ratio_gcn2rand),
# 'nb graph proceeded {}'.format(n_graphs_proceed))
| 47.961538
| 172
| 0.520907
|
import torch
import torch.optim as optm
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from data.graph import Graph
from collections import namedtuple
SavedAction = namedtuple('SavedAction', ['log_prob', 'value_current'])
class TrainModel_MC:
def __init__(self, model, train_dataset, val_dataset, max_grad_norm=2, use_cuda=False):
self.model = model
self.train_dataset = train_dataset
self.val_dataset = val_dataset
self.max_grad_norm = max_grad_norm
self.use_cuda = use_cuda
self.train_loader = DataLoader(train_dataset, shuffle=True, num_workers=1, batch_size=1, collate_fn=lambda x: x)
self.val_loader = DataLoader(val_dataset, shuffle=True, num_workers=1, batch_size=1, collate_fn=lambda x: x)
self.epochs = 0
self.beta = 0.9
self.eps = np.finfo(np.float32).eps.item()
def train_and_validate(self, n_epochs, lr_actor, lr_critic, gamma=0.99, use_critic=True):
self.actor_optim = optm.Adam(self.model.actor.parameters(), lr=lr_actor)
print(use_critic)
if use_critic:
self.critic_optim = optm.Adam(self.model.critic.parameters(), lr=lr_critic)
self.critic_loss_criterion = torch.nn.MSELoss()
else:
baseline = torch.zeros(1)
if self.use_cuda:
baseline = baseline.cuda()
for epoch in range(1):
n_graphs_proceed = 0
for X in self.train_loader:
for x in X:
self.model.train()
ratio_gcn2mind = []
ratio_gcn2rand = []
for epoch in range(n_epochs):
rewards_mindegree = 0
rewards_random = 0
x_mind = Graph(x.M)
x_rand = Graph(x.M)
x_rl = Graph(x.M)
for i in range(x.n - 2):
if i % 100 == 0:
print('iterations {}'.format(i))
node_mind, d_min = x_mind.min_degree(x_mind.M)
rewards_mindegree += x_mind.eliminate_node(node_mind, reduce=True)
rewards_random += x_rand.eliminate_node(np.random.randint(low=0, high=x_rand.n), reduce=True)
action, log_prob, reward, value_current, value_next, x_rl = self.model(x_rl)
self.model.rewards.append(reward)
self.model.actions.append(action)
self.model.saved_actions.append(SavedAction(log_prob, value_current))
R = 0
actor_losses = []
critic_losses = []
returns = []
for r in self.model.rewards[::-1]:
R = r + gamma * R
returns.insert(0, R)
returns = torch.tensor(returns)
returns = (returns - returns.mean()) / (returns.std() + self.eps)
saved_actions = self.model.saved_actions
for (log_prob, value_current), R in zip(saved_actions, returns):
if use_critic:
advantage = R - value_current
critic_losses.append(-value_current* advantage)
else:
advantage = R - baseline
actor_losses.append(log_prob * advantage.detach())
self.actor_optim.zero_grad()
actor_loss = torch.stack(actor_losses).sum()
actor_loss.backward(retain_graph=True)
self.actor_optim.step()
if use_critic:
self.critic_optim.zero_grad()
critic_closs = torch.stack(critic_losses).sum()
critic_closs.backward()
self.critic_optim.step()
else:
baseline = baseline.detach()
rewards_gcn = sum(self.model.rewards)
_ratio_gcn2mind = rewards_gcn / rewards_mindegree
_ratio_gcn2rand = rewards_gcn / rewards_random
print('graph {:04d}'.format(n_graphs_proceed), 'epoch {:04d}'.format(epoch),
'gcn2mind ratio {}'.format(_ratio_gcn2mind),
'value {}'.format(saved_actions[0].value_current),
'R {}'.format(returns[0]))
print('graph {:04d}'.format(n_graphs_proceed), 'epoch {:04d}'.format(epoch),
'gcn2rand ratio {}'.format(_ratio_gcn2rand))
ratio_gcn2mind.append(_ratio_gcn2mind)
ratio_gcn2rand.append(_ratio_gcn2rand)
del self.model.rewards[:]
del self.model.actions[:]
del self.model.saved_actions[:]
ratio_gcn2mind = np.array(ratio_gcn2mind).reshape(-1)
ratio_gcn2rand = np.array(ratio_gcn2rand).reshape(-1)
min_ratio_gcn2mind = np.min(ratio_gcn2mind)
max_ratio_gcn2mind = np.max(ratio_gcn2mind)
av_ratio_gcn2mind = np.sum(ratio_gcn2mind)/ n_epochs
min_ratio_gcn2rand = np.min(ratio_gcn2rand)
max_ratio_gcn2rand = np.max(ratio_gcn2rand)
av_ratio_gcn2rand = np.sum(ratio_gcn2rand) / n_epochs
print('graph {:04d}'.format(n_graphs_proceed), 'gcn2mind{:04d}',
'min_ratio {}'.format(min_ratio_gcn2mind),
'max_ratio {}'.format(max_ratio_gcn2mind),
'av_ratio {}'.format(av_ratio_gcn2mind))
print('graph {:04d}'.format(n_graphs_proceed), 'gcn2rand{:04d}',
'min_ratio {}'.format(min_ratio_gcn2rand),
'max_ratio {}'.format(max_ratio_gcn2rand),
'av_ratio {}'.format(av_ratio_gcn2rand),
'nb graph proceeded {}'.format(n_graphs_proceed))
n_graphs_proceed += len(X)
| true
| true
|
f71774d3bb18a2b3e70569ab8a10dcf140e26e81
| 5,989
|
py
|
Python
|
submit_data.py
|
dimagi/submission_api_example
|
266eb36c6ef6b331ea894298cbacfd2752410f80
|
[
"Apache-2.0"
] | null | null | null |
submit_data.py
|
dimagi/submission_api_example
|
266eb36c6ef6b331ea894298cbacfd2752410f80
|
[
"Apache-2.0"
] | 1
|
2021-12-06T20:29:54.000Z
|
2021-12-13T20:32:37.000Z
|
submit_data.py
|
dimagi/submission_api_example
|
266eb36c6ef6b331ea894298cbacfd2752410f80
|
[
"Apache-2.0"
] | 1
|
2021-12-06T20:27:00.000Z
|
2021-12-06T20:27:00.000Z
|
#!/usr/bin/env python3
"""
An example script to send data to CommCare using the Submission API
Usage:
$ export CCHQ_PROJECT_SPACE=my-project-space
$ export CCHQ_CASE_TYPE=person
$ export CCHQ_USERNAME=user@example.com
$ export CCHQ_PASSWORD=MijByG_se3EcKr.t
$ export CCHQ_USER_ID=c0ffeeeeeb574eb8b5d5036c9a61a483
$ export CCHQ_OWNER_ID=c0ffeeeee1e34b12bb5da0dc838e8406
$ ./submit_data.py sample_data.csv
"""
# (Optional) Configure the following settings with your values
# An XML namespace to identify your XForm submission
FORM_XMLNS = 'http://example.com/submission-api-example-form/'
# A string to identify the origin of your data
DEVICE_ID = "submission_api_example"
# End of configurable settings
import csv
import os
import sys
import uuid
from dataclasses import dataclass
from datetime import datetime, timezone
from http.client import responses as http_responses
from typing import Any, Iterable, List, Optional, Tuple
from xml.etree import ElementTree as ET
import requests
from jinja2 import Template
COMMCARE_URL = 'https://www.commcarehq.org/'
@dataclass
class CaseProperty:
name: str
value: Any
@dataclass
class Case:
id: str # A UUID. Generated if not given in the data.
name: str # Required
type: str # A name for the case type. e.g. "person" or "site"
modified_on: str # Generated if not given. e.g. "2020-06-08T18:41:33.207Z"
owner_id: str # ID of the user or location that cases must be assigned to
properties: List[CaseProperty] # All other given data
server_modified_on: Optional[str]
def main(filename):
"""
Sends data to CommCare HQ using the Submission API.
"""
data = get_data(filename)
cases = as_cases(data)
xform_str = render_xform(cases)
success, message = submit_xform(xform_str)
return success, message
def get_data(csv_filename) -> Iterable[dict]:
"""
Reads data in CSV format from the given filename, and yields it as
dictionaries.
"""
with open(csv_filename) as csv_file:
reader = csv.DictReader(csv_file)
yield from reader
def as_cases(data: Iterable[dict]) -> Iterable[Case]:
"""
Casts dictionaries as Case instances
"""
reserved = ('id', 'name', 'case_type', 'modified_on', 'server_modified_on')
for dict_ in data:
properties = [CaseProperty(name=key, value=value)
for key, value in dict_.items()
if key not in reserved]
yield Case(
id=dict_.get('id', str(uuid.uuid4())),
name=dict_['name'],
type=os.environ['CCHQ_CASE_TYPE'],
modified_on=dict_.get('modified_on', now_utc()),
owner_id=os.environ['CCHQ_OWNER_ID'],
server_modified_on=dict_.get('server_modified_on'),
properties=properties,
)
def render_xform(cases: Iterable[Case]) -> str:
context = {
'form_xmlns': FORM_XMLNS,
'device_id': DEVICE_ID,
'now_utc': now_utc(),
'cchq_username': os.environ['CCHQ_USERNAME'],
'cchq_user_id': os.environ['CCHQ_USER_ID'],
'submission_id': uuid.uuid4().hex,
'cases': list(cases),
}
with open('xform.xml.j2') as template_file:
template = Template(template_file.read())
xform = template.render(**context)
return xform
def submit_xform(xform: str) -> Tuple[bool, str]:
"""
Submits the given XForm to CommCare.
Returns (True, success_message) on success, or (False,
failure_message) on failure.
"""
url = join_url(COMMCARE_URL,
f'/a/{os.environ["CCHQ_PROJECT_SPACE"]}/receiver/api/')
auth = (os.environ['CCHQ_USERNAME'], os.environ['CCHQ_PASSWORD'])
headers = {'Content-Type': 'text/html; charset=UTF-8'}
response = requests.post(url, xform.encode('utf-8'),
headers=headers, auth=auth)
if not 200 <= response.status_code < 300:
return False, http_responses[response.status_code]
return parse_response(response.text)
def parse_response(text: str) -> Tuple[bool, str]:
"""
Parses a CommCare HQ Submission API response.
Returns (True, success_message) on success, or (False,
failure_message) on failure.
>>> text = '''
... <OpenRosaResponse xmlns="http://openrosa.org/http/response">
... <message nature="submit_success"> √ </message>
... </OpenRosaResponse>
... '''
>>> parse_response(text)
(True, ' √ ')
"""
xml = ET.XML(text)
message = xml.find('{http://openrosa.org/http/response}message')
success = message.attrib['nature'] == 'submit_success'
return success, message.text
def join_url(base_url: str, endpoint: str) -> str:
"""
Returns ``base_url`` + ``endpoint`` with the right forward slashes.
>>> join_url('https://example.com/', '/api/foo')
'https://example.com/api/foo'
>>> join_url('https://example.com', 'api/foo')
'https://example.com/api/foo'
"""
return '/'.join((base_url.rstrip('/'), endpoint.lstrip('/')))
def now_utc() -> str:
"""
Returns a UTC timestamp in ISO-8601 format with the offset as "Z".
e.g. "2020-06-08T18:41:33.207Z"
"""
now = datetime.now(tz=timezone.utc)
now_iso = now.isoformat(timespec='milliseconds')
now_iso_z = now_iso.replace('+00:00', 'Z')
return now_iso_z
def missing_env_vars():
env_vars = (
'CCHQ_PROJECT_SPACE',
'CCHQ_CASE_TYPE',
'CCHQ_USERNAME',
'CCHQ_PASSWORD',
'CCHQ_USER_ID',
'CCHQ_OWNER_ID',
)
return [env_var for env_var in env_vars if env_var not in os.environ]
if __name__ == '__main__':
if len(sys.argv) != 2:
print(__doc__)
sys.exit()
if missing := missing_env_vars():
print('Missing environment variables:', ', '.join(missing))
sys.exit(1)
success, message = main(sys.argv[1])
print(message)
if not success:
sys.exit(1)
| 29.357843
| 79
| 0.646185
|
FORM_XMLNS = 'http://example.com/submission-api-example-form/'
DEVICE_ID = "submission_api_example"
import csv
import os
import sys
import uuid
from dataclasses import dataclass
from datetime import datetime, timezone
from http.client import responses as http_responses
from typing import Any, Iterable, List, Optional, Tuple
from xml.etree import ElementTree as ET
import requests
from jinja2 import Template
COMMCARE_URL = 'https://www.commcarehq.org/'
@dataclass
class CaseProperty:
name: str
value: Any
@dataclass
class Case:
id: str
name: str
type: str
modified_on: str
owner_id: str
properties: List[CaseProperty]
server_modified_on: Optional[str]
def main(filename):
data = get_data(filename)
cases = as_cases(data)
xform_str = render_xform(cases)
success, message = submit_xform(xform_str)
return success, message
def get_data(csv_filename) -> Iterable[dict]:
with open(csv_filename) as csv_file:
reader = csv.DictReader(csv_file)
yield from reader
def as_cases(data: Iterable[dict]) -> Iterable[Case]:
reserved = ('id', 'name', 'case_type', 'modified_on', 'server_modified_on')
for dict_ in data:
properties = [CaseProperty(name=key, value=value)
for key, value in dict_.items()
if key not in reserved]
yield Case(
id=dict_.get('id', str(uuid.uuid4())),
name=dict_['name'],
type=os.environ['CCHQ_CASE_TYPE'],
modified_on=dict_.get('modified_on', now_utc()),
owner_id=os.environ['CCHQ_OWNER_ID'],
server_modified_on=dict_.get('server_modified_on'),
properties=properties,
)
def render_xform(cases: Iterable[Case]) -> str:
context = {
'form_xmlns': FORM_XMLNS,
'device_id': DEVICE_ID,
'now_utc': now_utc(),
'cchq_username': os.environ['CCHQ_USERNAME'],
'cchq_user_id': os.environ['CCHQ_USER_ID'],
'submission_id': uuid.uuid4().hex,
'cases': list(cases),
}
with open('xform.xml.j2') as template_file:
template = Template(template_file.read())
xform = template.render(**context)
return xform
def submit_xform(xform: str) -> Tuple[bool, str]:
url = join_url(COMMCARE_URL,
f'/a/{os.environ["CCHQ_PROJECT_SPACE"]}/receiver/api/')
auth = (os.environ['CCHQ_USERNAME'], os.environ['CCHQ_PASSWORD'])
headers = {'Content-Type': 'text/html; charset=UTF-8'}
response = requests.post(url, xform.encode('utf-8'),
headers=headers, auth=auth)
if not 200 <= response.status_code < 300:
return False, http_responses[response.status_code]
return parse_response(response.text)
def parse_response(text: str) -> Tuple[bool, str]:
xml = ET.XML(text)
message = xml.find('{http://openrosa.org/http/response}message')
success = message.attrib['nature'] == 'submit_success'
return success, message.text
def join_url(base_url: str, endpoint: str) -> str:
return '/'.join((base_url.rstrip('/'), endpoint.lstrip('/')))
def now_utc() -> str:
now = datetime.now(tz=timezone.utc)
now_iso = now.isoformat(timespec='milliseconds')
now_iso_z = now_iso.replace('+00:00', 'Z')
return now_iso_z
def missing_env_vars():
env_vars = (
'CCHQ_PROJECT_SPACE',
'CCHQ_CASE_TYPE',
'CCHQ_USERNAME',
'CCHQ_PASSWORD',
'CCHQ_USER_ID',
'CCHQ_OWNER_ID',
)
return [env_var for env_var in env_vars if env_var not in os.environ]
if __name__ == '__main__':
if len(sys.argv) != 2:
print(__doc__)
sys.exit()
if missing := missing_env_vars():
print('Missing environment variables:', ', '.join(missing))
sys.exit(1)
success, message = main(sys.argv[1])
print(message)
if not success:
sys.exit(1)
| true
| true
|
f717755be13370d96e8eff2b66e83a3b18716be8
| 515
|
py
|
Python
|
setup.py
|
Krozark/meteofrance-py
|
7328a857022f263d1609c939851f612c5ed13d08
|
[
"MIT"
] | null | null | null |
setup.py
|
Krozark/meteofrance-py
|
7328a857022f263d1609c939851f612c5ed13d08
|
[
"MIT"
] | null | null | null |
setup.py
|
Krozark/meteofrance-py
|
7328a857022f263d1609c939851f612c5ed13d08
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name='krozark-meteofrance',
version='0.3.9',
description = 'Meteo-France weather forecast',
author = 'victorcerutti',
author_email = 'maxime.barbier1991+meteofrance@gmail.com',
url = 'https://github.com/Krozark/meteofrance-py',
packages=['meteofrance',],
install_requires=[
'requests',
'beautifulsoup4',
'pytz'
],
license='MIT',
long_description='Extract Meteo-France current weather and 1 hour rain forecast',
)
| 27.105263
| 85
| 0.664078
|
from setuptools import setup
setup(
name='krozark-meteofrance',
version='0.3.9',
description = 'Meteo-France weather forecast',
author = 'victorcerutti',
author_email = 'maxime.barbier1991+meteofrance@gmail.com',
url = 'https://github.com/Krozark/meteofrance-py',
packages=['meteofrance',],
install_requires=[
'requests',
'beautifulsoup4',
'pytz'
],
license='MIT',
long_description='Extract Meteo-France current weather and 1 hour rain forecast',
)
| true
| true
|
f7177676b64b016a2006776e619b093446b0ff41
| 5,353
|
py
|
Python
|
test/language/choice_types/python/UInt64ParamChoiceTest.py
|
PeachOS/zserio
|
ea01f6906c125a6baab7e8ed865eeb08cd46c37c
|
[
"BSD-3-Clause"
] | 2
|
2019-02-06T17:50:24.000Z
|
2019-11-20T16:51:34.000Z
|
test/language/choice_types/python/UInt64ParamChoiceTest.py
|
PeachOS/zserio
|
ea01f6906c125a6baab7e8ed865eeb08cd46c37c
|
[
"BSD-3-Clause"
] | 1
|
2019-11-25T16:25:51.000Z
|
2019-11-25T18:09:39.000Z
|
test/language/choice_types/python/UInt64ParamChoiceTest.py
|
PeachOS/zserio
|
ea01f6906c125a6baab7e8ed865eeb08cd46c37c
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import zserio
from testutils import getZserioApi
class UInt64ParamChoiceTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "choice_types.zs").uint64_param_choice
def testSelectorConstructor(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
self.assertEqual(self.VARIANT_A_SELECTOR, uint64ParamChoice.getSelector())
def testFromReader(self):
selector = self.VARIANT_B_SELECTOR
value = 234
writer = zserio.BitStreamWriter()
UInt64ParamChoiceTest._writeUInt64ParamChoiceToStream(writer, selector, value)
reader = zserio.BitStreamReader(writer.getByteArray())
uint64ParamChoice = self.api.UInt64ParamChoice.fromReader(reader, selector)
self.assertEqual(selector, uint64ParamChoice.getSelector())
self.assertEqual(value, uint64ParamChoice.getB())
def testEq(self):
uint64ParamChoice1 = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
uint64ParamChoice2 = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
self.assertTrue(uint64ParamChoice1 == uint64ParamChoice2)
value = 99
uint64ParamChoice1.setA(value)
self.assertFalse(uint64ParamChoice1 == uint64ParamChoice2)
uint64ParamChoice2.setA(value)
self.assertTrue(uint64ParamChoice1 == uint64ParamChoice2)
diffValue = value + 1
uint64ParamChoice2.setA(diffValue)
self.assertFalse(uint64ParamChoice1 == uint64ParamChoice2)
def testHash(self):
uint64ParamChoice1 = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
uint64ParamChoice2 = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
self.assertEqual(hash(uint64ParamChoice1), hash(uint64ParamChoice2))
value = 99
uint64ParamChoice1.setA(value)
self.assertTrue(hash(uint64ParamChoice1) != hash(uint64ParamChoice2))
uint64ParamChoice2.setA(value)
self.assertEqual(hash(uint64ParamChoice1), hash(uint64ParamChoice2))
diffValue = value + 1
uint64ParamChoice2.setA(diffValue)
self.assertTrue(hash(uint64ParamChoice1) != hash(uint64ParamChoice2))
def testGetSelector(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_C_SELECTOR)
self.assertEqual(self.VARIANT_C_SELECTOR, uint64ParamChoice.getSelector())
def testGetSetA(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
value = 99
uint64ParamChoice.setA(value)
self.assertEqual(value, uint64ParamChoice.getA())
def testGetSetB(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
value = 234
uint64ParamChoice.setB(value)
self.assertEqual(value, uint64ParamChoice.getB())
def testGetSetC(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_C_SELECTOR)
value = 23456
uint64ParamChoice.setC(value)
self.assertEqual(value, uint64ParamChoice.getC())
def testBitSizeOf(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
self.assertEqual(8, uint64ParamChoice.bitSizeOf())
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
self.assertEqual(16, uint64ParamChoice.bitSizeOf())
def testInitializeOffsets(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
bitPosition = 1
self.assertEqual(9, uint64ParamChoice.initializeOffsets(bitPosition))
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
self.assertEqual(17, uint64ParamChoice.initializeOffsets(bitPosition))
def testReadWrite(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
byteValue = 99
uint64ParamChoice.setA(byteValue)
writer = zserio.BitStreamWriter()
uint64ParamChoice.write(writer)
readUInt64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
reader = zserio.BitStreamReader(writer.getByteArray())
readUInt64ParamChoice.read(reader)
self.assertEqual(byteValue, readUInt64ParamChoice.getA())
self.assertEqual(uint64ParamChoice, readUInt64ParamChoice)
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
shortValue = 234
uint64ParamChoice.setB(shortValue)
writer = zserio.BitStreamWriter()
uint64ParamChoice.write(writer)
readUInt64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
reader = zserio.BitStreamReader(writer.getByteArray())
readUInt64ParamChoice.read(reader)
self.assertEqual(shortValue, readUInt64ParamChoice.getB())
self.assertEqual(uint64ParamChoice, readUInt64ParamChoice)
@staticmethod
def _writeUInt64ParamChoiceToStream(writer, selector, value):
if selector == 1:
writer.writeSignedBits(value, 8)
elif selector in (2, 3, 4):
writer.writeSignedBits(value, 16)
elif selector in (5, 6):
pass
else:
writer.writeSignedBits(value, 32)
VARIANT_A_SELECTOR = 1
VARIANT_B_SELECTOR = 2
VARIANT_C_SELECTOR = 7
| 40.862595
| 86
| 0.721838
|
import unittest
import zserio
from testutils import getZserioApi
class UInt64ParamChoiceTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "choice_types.zs").uint64_param_choice
def testSelectorConstructor(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
self.assertEqual(self.VARIANT_A_SELECTOR, uint64ParamChoice.getSelector())
def testFromReader(self):
selector = self.VARIANT_B_SELECTOR
value = 234
writer = zserio.BitStreamWriter()
UInt64ParamChoiceTest._writeUInt64ParamChoiceToStream(writer, selector, value)
reader = zserio.BitStreamReader(writer.getByteArray())
uint64ParamChoice = self.api.UInt64ParamChoice.fromReader(reader, selector)
self.assertEqual(selector, uint64ParamChoice.getSelector())
self.assertEqual(value, uint64ParamChoice.getB())
def testEq(self):
uint64ParamChoice1 = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
uint64ParamChoice2 = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
self.assertTrue(uint64ParamChoice1 == uint64ParamChoice2)
value = 99
uint64ParamChoice1.setA(value)
self.assertFalse(uint64ParamChoice1 == uint64ParamChoice2)
uint64ParamChoice2.setA(value)
self.assertTrue(uint64ParamChoice1 == uint64ParamChoice2)
diffValue = value + 1
uint64ParamChoice2.setA(diffValue)
self.assertFalse(uint64ParamChoice1 == uint64ParamChoice2)
def testHash(self):
uint64ParamChoice1 = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
uint64ParamChoice2 = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
self.assertEqual(hash(uint64ParamChoice1), hash(uint64ParamChoice2))
value = 99
uint64ParamChoice1.setA(value)
self.assertTrue(hash(uint64ParamChoice1) != hash(uint64ParamChoice2))
uint64ParamChoice2.setA(value)
self.assertEqual(hash(uint64ParamChoice1), hash(uint64ParamChoice2))
diffValue = value + 1
uint64ParamChoice2.setA(diffValue)
self.assertTrue(hash(uint64ParamChoice1) != hash(uint64ParamChoice2))
def testGetSelector(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_C_SELECTOR)
self.assertEqual(self.VARIANT_C_SELECTOR, uint64ParamChoice.getSelector())
def testGetSetA(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
value = 99
uint64ParamChoice.setA(value)
self.assertEqual(value, uint64ParamChoice.getA())
def testGetSetB(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
value = 234
uint64ParamChoice.setB(value)
self.assertEqual(value, uint64ParamChoice.getB())
def testGetSetC(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_C_SELECTOR)
value = 23456
uint64ParamChoice.setC(value)
self.assertEqual(value, uint64ParamChoice.getC())
def testBitSizeOf(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
self.assertEqual(8, uint64ParamChoice.bitSizeOf())
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
self.assertEqual(16, uint64ParamChoice.bitSizeOf())
def testInitializeOffsets(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
bitPosition = 1
self.assertEqual(9, uint64ParamChoice.initializeOffsets(bitPosition))
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
self.assertEqual(17, uint64ParamChoice.initializeOffsets(bitPosition))
def testReadWrite(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
byteValue = 99
uint64ParamChoice.setA(byteValue)
writer = zserio.BitStreamWriter()
uint64ParamChoice.write(writer)
readUInt64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
reader = zserio.BitStreamReader(writer.getByteArray())
readUInt64ParamChoice.read(reader)
self.assertEqual(byteValue, readUInt64ParamChoice.getA())
self.assertEqual(uint64ParamChoice, readUInt64ParamChoice)
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
shortValue = 234
uint64ParamChoice.setB(shortValue)
writer = zserio.BitStreamWriter()
uint64ParamChoice.write(writer)
readUInt64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
reader = zserio.BitStreamReader(writer.getByteArray())
readUInt64ParamChoice.read(reader)
self.assertEqual(shortValue, readUInt64ParamChoice.getB())
self.assertEqual(uint64ParamChoice, readUInt64ParamChoice)
@staticmethod
def _writeUInt64ParamChoiceToStream(writer, selector, value):
if selector == 1:
writer.writeSignedBits(value, 8)
elif selector in (2, 3, 4):
writer.writeSignedBits(value, 16)
elif selector in (5, 6):
pass
else:
writer.writeSignedBits(value, 32)
VARIANT_A_SELECTOR = 1
VARIANT_B_SELECTOR = 2
VARIANT_C_SELECTOR = 7
| true
| true
|
f717772bd33c93521c158ce38b78042fe52c2ff5
| 52
|
py
|
Python
|
subjects/__init__.py
|
ankit0tech/Research-Productivity-Tool
|
c08e39daaaa8dfa08f5eb2607986d9f6bf9f02fa
|
[
"MIT"
] | 1
|
2021-12-12T04:54:05.000Z
|
2021-12-12T04:54:05.000Z
|
subjects/__init__.py
|
ankit0tech/Research-Productivity-Tool
|
c08e39daaaa8dfa08f5eb2607986d9f6bf9f02fa
|
[
"MIT"
] | null | null | null |
subjects/__init__.py
|
ankit0tech/Research-Productivity-Tool
|
c08e39daaaa8dfa08f5eb2607986d9f6bf9f02fa
|
[
"MIT"
] | null | null | null |
default_app_config = 'subjects.apps.SubjectsConfig'
| 26
| 51
| 0.846154
|
default_app_config = 'subjects.apps.SubjectsConfig'
| true
| true
|
f71777438e6b24d7bdde702a1788bd674bd9b0a3
| 589
|
py
|
Python
|
Appium_learning/02_ChangeApp.py
|
yeyuning1/AutoTT
|
1ce88e9e73d71fa11d4d8ad12bd6741aa71f97d2
|
[
"MIT"
] | null | null | null |
Appium_learning/02_ChangeApp.py
|
yeyuning1/AutoTT
|
1ce88e9e73d71fa11d4d8ad12bd6741aa71f97d2
|
[
"MIT"
] | 1
|
2021-06-02T00:24:41.000Z
|
2021-06-02T00:24:41.000Z
|
Appium_learning/02_ChangeApp.py
|
yeyuning1/AutoTT
|
1ce88e9e73d71fa11d4d8ad12bd6741aa71f97d2
|
[
"MIT"
] | null | null | null |
import time
from appium import webdriver
from Appium_learning import app_settings
driver = webdriver.Remote('http://localhost:4723/wd/hub', app_settings.desired_caps)
print(driver.current_package)
print(driver.current_activity)
print(driver.context)
time.sleep(5)
# adb shell dumpsys window windows | findstr(grep) mFocusedApp
driver.start_activity('com.android.messaging', '.ui.conversationlist.ConversationListActivity')
print(driver.current_package)
print(driver.current_activity)
print(driver.context)
time.sleep(5)
# driver.quit() 销毁 driver 驱动对象 --> stop_client
driver.close_app()
| 31
| 95
| 0.814941
|
import time
from appium import webdriver
from Appium_learning import app_settings
driver = webdriver.Remote('http://localhost:4723/wd/hub', app_settings.desired_caps)
print(driver.current_package)
print(driver.current_activity)
print(driver.context)
time.sleep(5)
driver.start_activity('com.android.messaging', '.ui.conversationlist.ConversationListActivity')
print(driver.current_package)
print(driver.current_activity)
print(driver.context)
time.sleep(5)
driver.close_app()
| true
| true
|
f717798fa86a8765f50f6a661d1e837315188e97
| 2,151
|
py
|
Python
|
setup.py
|
deliri/ChatterBot
|
8d95c43371bf8b7b1a1c44f77827b239bf38dc4e
|
[
"BSD-3-Clause"
] | 1
|
2021-03-06T00:28:20.000Z
|
2021-03-06T00:28:20.000Z
|
setup.py
|
deliri/ChatterBot
|
8d95c43371bf8b7b1a1c44f77827b239bf38dc4e
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
deliri/ChatterBot
|
8d95c43371bf8b7b1a1c44f77827b239bf38dc4e
|
[
"BSD-3-Clause"
] | 2
|
2017-05-30T02:18:30.000Z
|
2021-02-21T18:15:25.000Z
|
#!/usr/bin/env python
"""
ChatterBot setup file.
"""
from setuptools import setup
# Dynamically retrieve the version information from the chatterbot module
CHATTERBOT = __import__('chatterbot')
VERSION = CHATTERBOT.__version__
AUTHOR = CHATTERBOT.__author__
AUTHOR_EMAIL = CHATTERBOT.__email__
URL = CHATTERBOT.__url__
DESCRIPTION = CHATTERBOT.__doc__
with open('requirements.txt') as requirements:
REQUIREMENTS = requirements.readlines()
setup(
name='ChatterBot',
version=VERSION,
url=URL,
download_url='{}/tarball/{}'.format(URL, VERSION),
setup_requires=['setuptools-markdown'],
long_description_markdown_filename='readme.md',
description=DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
packages=[
'chatterbot',
'chatterbot.input',
'chatterbot.output',
'chatterbot.storage',
'chatterbot.logic',
'chatterbot.corpus',
'chatterbot.conversation',
'chatterbot.ext',
'chatterbot.ext.django_chatterbot',
'chatterbot.ext.django_chatterbot.migrations',
'chatterbot.ext.django_chatterbot.management',
'chatterbot.ext.django_chatterbot.management.commands'
],
package_dir={'chatterbot': 'chatterbot'},
include_package_data=True,
install_requires=REQUIREMENTS,
license='BSD',
zip_safe=False,
platforms=['any'],
keywords=['ChatterBot', 'chatbot', 'chat', 'bot'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Environment :: Console',
'Environment :: Web Environment',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Chat',
'Topic :: Internet',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=['mock']
)
| 31.173913
| 73
| 0.653185
|
from setuptools import setup
CHATTERBOT = __import__('chatterbot')
VERSION = CHATTERBOT.__version__
AUTHOR = CHATTERBOT.__author__
AUTHOR_EMAIL = CHATTERBOT.__email__
URL = CHATTERBOT.__url__
DESCRIPTION = CHATTERBOT.__doc__
with open('requirements.txt') as requirements:
REQUIREMENTS = requirements.readlines()
setup(
name='ChatterBot',
version=VERSION,
url=URL,
download_url='{}/tarball/{}'.format(URL, VERSION),
setup_requires=['setuptools-markdown'],
long_description_markdown_filename='readme.md',
description=DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
packages=[
'chatterbot',
'chatterbot.input',
'chatterbot.output',
'chatterbot.storage',
'chatterbot.logic',
'chatterbot.corpus',
'chatterbot.conversation',
'chatterbot.ext',
'chatterbot.ext.django_chatterbot',
'chatterbot.ext.django_chatterbot.migrations',
'chatterbot.ext.django_chatterbot.management',
'chatterbot.ext.django_chatterbot.management.commands'
],
package_dir={'chatterbot': 'chatterbot'},
include_package_data=True,
install_requires=REQUIREMENTS,
license='BSD',
zip_safe=False,
platforms=['any'],
keywords=['ChatterBot', 'chatbot', 'chat', 'bot'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Environment :: Console',
'Environment :: Web Environment',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Chat',
'Topic :: Internet',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=['mock']
)
| true
| true
|
f71779abb101df0998a79dffc26322edc971e6e8
| 10,423
|
py
|
Python
|
seleniumbase/core/jqc_helper.py
|
mdmintz/seleniumspot
|
f5c225aa4fcd0b4124fc990e3892c36736290ce8
|
[
"MIT"
] | 1
|
2015-06-17T10:16:26.000Z
|
2015-06-17T10:16:26.000Z
|
seleniumbase/core/jqc_helper.py
|
mdmintz/seleniumspot
|
f5c225aa4fcd0b4124fc990e3892c36736290ce8
|
[
"MIT"
] | null | null | null |
seleniumbase/core/jqc_helper.py
|
mdmintz/seleniumspot
|
f5c225aa4fcd0b4124fc990e3892c36736290ce8
|
[
"MIT"
] | null | null | null |
"""
This module contains methods for opening jquery-confirm boxes.
These helper methods SHOULD NOT be called directly from tests.
"""
from seleniumbase.fixtures import constants
from seleniumbase.fixtures import js_utils
form_code = """'<form align="center" action="" class="jqc_form">' +
'<div class="form-group">' +
'<input style="font-size:20px; background-color: #f8fdfd; ' +
' width: 84%%; border: 1px solid blue; ' +
' box-shadow:inset 0 0 2px 2px #f4fafa;"' +
' type="text" class="jqc_input" />' +
'</div>' +
'</form>'"""
def jquery_confirm_button_dialog(driver, message, buttons, options=None):
js_utils.activate_jquery_confirm(driver)
# These defaults will be overwritten later if set
theme = constants.JqueryConfirm.DEFAULT_THEME
border_color = constants.JqueryConfirm.DEFAULT_COLOR
width = constants.JqueryConfirm.DEFAULT_WIDTH
if options:
for option in options:
if option[0].lower() == "theme":
theme = option[1]
elif option[0].lower() == "color":
border_color = option[1]
elif option[0].lower() == "width":
width = option[1]
else:
raise Exception('Unknown option: "%s"' % option[0])
if not message:
message = ""
key_row = ""
if len(buttons) == 1: # There's only one button as an option
key_row = "keys: ['enter', 'y', '1']," # Shortcut: "Enter","Y","1"
b_html = """button_%s: {
btnClass: 'btn-%s',
text: '<b>%s</b>',
%s
action: function(){
jqc_status = '%s';
$jqc_status = jqc_status;
jconfirm.lastButtonText = jqc_status;
}
},"""
all_buttons = ""
btn_count = 0
for button in buttons:
btn_count += 1
text = button[0]
text = js_utils.escape_quotes_if_needed(text)
if len(buttons) > 1 and text.lower() == "yes":
key_row = "keys: ['y'],"
if btn_count < 10:
key_row = "keys: ['y', '%s']," % btn_count
elif len(buttons) > 1 and text.lower() == "no":
key_row = "keys: ['n'],"
if btn_count < 10:
key_row = "keys: ['n', '%s']," % btn_count
elif len(buttons) > 1:
if btn_count < 10:
key_row = "keys: ['%s']," % btn_count
color = button[1]
if not color:
color = "blue"
new_button = b_html % (btn_count, color, text, key_row, text)
all_buttons += new_button
content = '<div></div><font color="#0066ee">%s</font>' % (message)
content = js_utils.escape_quotes_if_needed(content)
overlay_opacity = "0.32"
if theme.lower() == "supervan":
overlay_opacity = "0.56"
if theme.lower() == "bootstrap":
overlay_opacity = "0.64"
if theme.lower() == "modern":
overlay_opacity = "0.5"
if theme.lower() == "material":
overlay_opacity = "0.4"
jqcd = """jconfirm({
boxWidth: '%s',
useBootstrap: false,
containerFluid: true,
bgOpacity: %s,
type: '%s',
theme: '%s',
animationBounce: 1,
typeAnimated: true,
animation: 'scale',
draggable: true,
dragWindowGap: 1,
container: 'body',
title: '%s',
content: '<div></div>',
buttons: {
%s
}
});""" % (
width,
overlay_opacity,
border_color,
theme,
content,
all_buttons,
)
driver.execute_script(jqcd)
def jquery_confirm_text_dialog(driver, message, button=None, options=None):
js_utils.activate_jquery_confirm(driver)
# These defaults will be overwritten later if set
theme = constants.JqueryConfirm.DEFAULT_THEME
border_color = constants.JqueryConfirm.DEFAULT_COLOR
width = constants.JqueryConfirm.DEFAULT_WIDTH
if not message:
message = ""
if button:
if not type(button) is list and not type(button) is tuple:
raise Exception('"button" should be a (text, color) tuple!')
if len(button) != 2:
raise Exception('"button" should be a (text, color) tuple!')
else:
button = ("Submit", "blue")
if options:
for option in options:
if option[0].lower() == "theme":
theme = option[1]
elif option[0].lower() == "color":
border_color = option[1]
elif option[0].lower() == "width":
width = option[1]
else:
raise Exception('Unknown option: "%s"' % option[0])
btn_text = button[0]
btn_color = button[1]
if not btn_color:
btn_color = "blue"
content = '<div></div><font color="#0066ee">%s</font>' % (message)
content = js_utils.escape_quotes_if_needed(content)
overlay_opacity = "0.32"
if theme.lower() == "supervan":
overlay_opacity = "0.56"
if theme.lower() == "bootstrap":
overlay_opacity = "0.64"
if theme.lower() == "modern":
overlay_opacity = "0.5"
if theme.lower() == "material":
overlay_opacity = "0.4"
jqcd = """jconfirm({
boxWidth: '%s',
useBootstrap: false,
containerFluid: true,
bgOpacity: %s,
type: '%s',
theme: '%s',
animationBounce: 1,
typeAnimated: true,
animation: 'scale',
draggable: true,
dragWindowGap: 1,
container: 'body',
title: '%s',
content: '<div></div>' +
%s,
buttons: {
formSubmit: {
btnClass: 'btn-%s',
text: '%s',
action: function () {
jqc_input = this.$content.find('.jqc_input').val();
$jqc_input = this.$content.find('.jqc_input').val();
jconfirm.lastInputText = jqc_input;
$jqc_status = '%s'; // There is only one button
},
},
},
onContentReady: function () {
var jc = this;
this.$content.find('form.jqc_form').on('submit', function (e) {
// User submits the form by pressing "Enter" in the field
e.preventDefault();
jc.$$formSubmit.trigger('click'); // Click the button
});
}
});""" % (
width,
overlay_opacity,
border_color,
theme,
content,
form_code,
btn_color,
btn_text,
btn_text,
)
driver.execute_script(jqcd)
def jquery_confirm_full_dialog(driver, message, buttons, options=None):
js_utils.activate_jquery_confirm(driver)
# These defaults will be overwritten later if set
theme = constants.JqueryConfirm.DEFAULT_THEME
border_color = constants.JqueryConfirm.DEFAULT_COLOR
width = constants.JqueryConfirm.DEFAULT_WIDTH
if not message:
message = ""
btn_count = 0
b_html = """button_%s: {
btnClass: 'btn-%s',
text: '%s',
action: function(){
jqc_input = this.$content.find('.jqc_input').val();
$jqc_input = this.$content.find('.jqc_input').val();
jconfirm.lastInputText = jqc_input;
$jqc_status = '%s';
}
},"""
b1_html = """formSubmit: {
btnClass: 'btn-%s',
text: '%s',
action: function(){
jqc_input = this.$content.find('.jqc_input').val();
$jqc_input = this.$content.find('.jqc_input').val();
jconfirm.lastInputText = jqc_input;
jqc_status = '%s';
$jqc_status = jqc_status;
jconfirm.lastButtonText = jqc_status;
}
},"""
one_button_trigger = ""
if len(buttons) == 1:
# If there's only one button, allow form submit with "Enter/Return"
one_button_trigger = "jc.$$formSubmit.trigger('click');"
all_buttons = ""
for button in buttons:
text = button[0]
text = js_utils.escape_quotes_if_needed(text)
color = button[1]
if not color:
color = "blue"
btn_count += 1
if len(buttons) == 1:
new_button = b1_html % (color, text, text)
else:
new_button = b_html % (btn_count, color, text, text)
all_buttons += new_button
if options:
for option in options:
if option[0].lower() == "theme":
theme = option[1]
elif option[0].lower() == "color":
border_color = option[1]
elif option[0].lower() == "width":
width = option[1]
else:
raise Exception('Unknown option: "%s"' % option[0])
content = '<div></div><font color="#0066ee">%s</font>' % (message)
content = js_utils.escape_quotes_if_needed(content)
overlay_opacity = "0.32"
if theme.lower() == "supervan":
overlay_opacity = "0.56"
if theme.lower() == "bootstrap":
overlay_opacity = "0.64"
if theme.lower() == "modern":
overlay_opacity = "0.5"
if theme.lower() == "material":
overlay_opacity = "0.4"
jqcd = """jconfirm({
boxWidth: '%s',
useBootstrap: false,
containerFluid: true,
bgOpacity: %s,
type: '%s',
theme: '%s',
animationBounce: 1,
typeAnimated: true,
animation: 'scale',
draggable: true,
dragWindowGap: 1,
container: 'body',
title: '%s',
content: '<div></div>' +
%s,
buttons: {
%s
},
onContentReady: function () {
var jc = this;
this.$content.find('form.jqc_form').on('submit', function (e) {
// User submits the form by pressing "Enter" in the field
e.preventDefault();
%s
});
}
});""" % (
width,
overlay_opacity,
border_color,
theme,
content,
form_code,
all_buttons,
one_button_trigger,
)
driver.execute_script(jqcd)
| 33.300319
| 75
| 0.515399
|
from seleniumbase.fixtures import constants
from seleniumbase.fixtures import js_utils
form_code = """'<form align="center" action="" class="jqc_form">' +
'<div class="form-group">' +
'<input style="font-size:20px; background-color: #f8fdfd; ' +
' width: 84%%; border: 1px solid blue; ' +
' box-shadow:inset 0 0 2px 2px #f4fafa;"' +
' type="text" class="jqc_input" />' +
'</div>' +
'</form>'"""
def jquery_confirm_button_dialog(driver, message, buttons, options=None):
js_utils.activate_jquery_confirm(driver)
theme = constants.JqueryConfirm.DEFAULT_THEME
border_color = constants.JqueryConfirm.DEFAULT_COLOR
width = constants.JqueryConfirm.DEFAULT_WIDTH
if options:
for option in options:
if option[0].lower() == "theme":
theme = option[1]
elif option[0].lower() == "color":
border_color = option[1]
elif option[0].lower() == "width":
width = option[1]
else:
raise Exception('Unknown option: "%s"' % option[0])
if not message:
message = ""
key_row = ""
if len(buttons) == 1:
key_row = "keys: ['enter', 'y', '1']," # Shortcut: "Enter","Y","1"
b_html = """button_%s: {
btnClass: 'btn-%s',
text: '<b>%s</b>',
%s
action: function(){
jqc_status = '%s';
$jqc_status = jqc_status;
jconfirm.lastButtonText = jqc_status;
}
},"""
all_buttons = ""
btn_count = 0
for button in buttons:
btn_count += 1
text = button[0]
text = js_utils.escape_quotes_if_needed(text)
if len(buttons) > 1 and text.lower() == "yes":
key_row = "keys: ['y'],"
if btn_count < 10:
key_row = "keys: ['y', '%s']," % btn_count
elif len(buttons) > 1 and text.lower() == "no":
key_row = "keys: ['n'],"
if btn_count < 10:
key_row = "keys: ['n', '%s']," % btn_count
elif len(buttons) > 1:
if btn_count < 10:
key_row = "keys: ['%s']," % btn_count
color = button[1]
if not color:
color = "blue"
new_button = b_html % (btn_count, color, text, key_row, text)
all_buttons += new_button
content = '<div></div><font color="#0066ee">%s</font>' % (message)
content = js_utils.escape_quotes_if_needed(content)
overlay_opacity = "0.32"
if theme.lower() == "supervan":
overlay_opacity = "0.56"
if theme.lower() == "bootstrap":
overlay_opacity = "0.64"
if theme.lower() == "modern":
overlay_opacity = "0.5"
if theme.lower() == "material":
overlay_opacity = "0.4"
jqcd = """jconfirm({
boxWidth: '%s',
useBootstrap: false,
containerFluid: true,
bgOpacity: %s,
type: '%s',
theme: '%s',
animationBounce: 1,
typeAnimated: true,
animation: 'scale',
draggable: true,
dragWindowGap: 1,
container: 'body',
title: '%s',
content: '<div></div>',
buttons: {
%s
}
});""" % (
width,
overlay_opacity,
border_color,
theme,
content,
all_buttons,
)
driver.execute_script(jqcd)
def jquery_confirm_text_dialog(driver, message, button=None, options=None):
js_utils.activate_jquery_confirm(driver)
# These defaults will be overwritten later if set
theme = constants.JqueryConfirm.DEFAULT_THEME
border_color = constants.JqueryConfirm.DEFAULT_COLOR
width = constants.JqueryConfirm.DEFAULT_WIDTH
if not message:
message = ""
if button:
if not type(button) is list and not type(button) is tuple:
raise Exception('"button" should be a (text, color) tuple!')
if len(button) != 2:
raise Exception('"button" should be a (text, color) tuple!')
else:
button = ("Submit", "blue")
if options:
for option in options:
if option[0].lower() == "theme":
theme = option[1]
elif option[0].lower() == "color":
border_color = option[1]
elif option[0].lower() == "width":
width = option[1]
else:
raise Exception('Unknown option: "%s"' % option[0])
btn_text = button[0]
btn_color = button[1]
if not btn_color:
btn_color = "blue"
content = '<div></div><font color="#0066ee">%s</font>' % (message)
content = js_utils.escape_quotes_if_needed(content)
overlay_opacity = "0.32"
if theme.lower() == "supervan":
overlay_opacity = "0.56"
if theme.lower() == "bootstrap":
overlay_opacity = "0.64"
if theme.lower() == "modern":
overlay_opacity = "0.5"
if theme.lower() == "material":
overlay_opacity = "0.4"
jqcd = """jconfirm({
boxWidth: '%s',
useBootstrap: false,
containerFluid: true,
bgOpacity: %s,
type: '%s',
theme: '%s',
animationBounce: 1,
typeAnimated: true,
animation: 'scale',
draggable: true,
dragWindowGap: 1,
container: 'body',
title: '%s',
content: '<div></div>' +
%s,
buttons: {
formSubmit: {
btnClass: 'btn-%s',
text: '%s',
action: function () {
jqc_input = this.$content.find('.jqc_input').val();
$jqc_input = this.$content.find('.jqc_input').val();
jconfirm.lastInputText = jqc_input;
$jqc_status = '%s'; // There is only one button
},
},
},
onContentReady: function () {
var jc = this;
this.$content.find('form.jqc_form').on('submit', function (e) {
// User submits the form by pressing "Enter" in the field
e.preventDefault();
jc.$$formSubmit.trigger('click'); // Click the button
});
}
});""" % (
width,
overlay_opacity,
border_color,
theme,
content,
form_code,
btn_color,
btn_text,
btn_text,
)
driver.execute_script(jqcd)
def jquery_confirm_full_dialog(driver, message, buttons, options=None):
js_utils.activate_jquery_confirm(driver)
# These defaults will be overwritten later if set
theme = constants.JqueryConfirm.DEFAULT_THEME
border_color = constants.JqueryConfirm.DEFAULT_COLOR
width = constants.JqueryConfirm.DEFAULT_WIDTH
if not message:
message = ""
btn_count = 0
b_html = """button_%s: {
btnClass: 'btn-%s',
text: '%s',
action: function(){
jqc_input = this.$content.find('.jqc_input').val();
$jqc_input = this.$content.find('.jqc_input').val();
jconfirm.lastInputText = jqc_input;
$jqc_status = '%s';
}
},"""
b1_html = """formSubmit: {
btnClass: 'btn-%s',
text: '%s',
action: function(){
jqc_input = this.$content.find('.jqc_input').val();
$jqc_input = this.$content.find('.jqc_input').val();
jconfirm.lastInputText = jqc_input;
jqc_status = '%s';
$jqc_status = jqc_status;
jconfirm.lastButtonText = jqc_status;
}
},"""
one_button_trigger = ""
if len(buttons) == 1:
# If there's only one button, allow form submit with "Enter/Return"
one_button_trigger = "jc.$$formSubmit.trigger('click');"
all_buttons = ""
for button in buttons:
text = button[0]
text = js_utils.escape_quotes_if_needed(text)
color = button[1]
if not color:
color = "blue"
btn_count += 1
if len(buttons) == 1:
new_button = b1_html % (color, text, text)
else:
new_button = b_html % (btn_count, color, text, text)
all_buttons += new_button
if options:
for option in options:
if option[0].lower() == "theme":
theme = option[1]
elif option[0].lower() == "color":
border_color = option[1]
elif option[0].lower() == "width":
width = option[1]
else:
raise Exception('Unknown option: "%s"' % option[0])
content = '<div></div><font color="#0066ee">%s</font>' % (message)
content = js_utils.escape_quotes_if_needed(content)
overlay_opacity = "0.32"
if theme.lower() == "supervan":
overlay_opacity = "0.56"
if theme.lower() == "bootstrap":
overlay_opacity = "0.64"
if theme.lower() == "modern":
overlay_opacity = "0.5"
if theme.lower() == "material":
overlay_opacity = "0.4"
jqcd = """jconfirm({
boxWidth: '%s',
useBootstrap: false,
containerFluid: true,
bgOpacity: %s,
type: '%s',
theme: '%s',
animationBounce: 1,
typeAnimated: true,
animation: 'scale',
draggable: true,
dragWindowGap: 1,
container: 'body',
title: '%s',
content: '<div></div>' +
%s,
buttons: {
%s
},
onContentReady: function () {
var jc = this;
this.$content.find('form.jqc_form').on('submit', function (e) {
// User submits the form by pressing "Enter" in the field
e.preventDefault();
%s
});
}
});""" % (
width,
overlay_opacity,
border_color,
theme,
content,
form_code,
all_buttons,
one_button_trigger,
)
driver.execute_script(jqcd)
| true
| true
|
f71779ec19d93ec24d41da7b598913f8c5798de5
| 1,316
|
py
|
Python
|
ShowProcess.py
|
4a5g0030/line_follow
|
570e65fb62803f7f5062402a45654809b01b7aaa
|
[
"MIT"
] | 1
|
2019-06-19T18:32:28.000Z
|
2019-06-19T18:32:28.000Z
|
ShowProcess.py
|
4a5g0030/line_follow
|
570e65fb62803f7f5062402a45654809b01b7aaa
|
[
"MIT"
] | null | null | null |
ShowProcess.py
|
4a5g0030/line_follow
|
570e65fb62803f7f5062402a45654809b01b7aaa
|
[
"MIT"
] | null | null | null |
import time
import sys
class ShowProcess():
# """
# 显示处理进度的类
# 调用该类相关函数即可实现处理进度的显示
# """
i = 0 # 当前的处理进度
max_steps = 0 # 总共需要处理的次数
max_arrow = 50 #进度条的长度
infoDone = 'done'
# 初始化函数,需要知道总共的处理次数
def __init__(self, max_steps, infoDone = 'Done'):
self.max_steps = max_steps
self.i = 0
self.infoDone = infoDone
# 显示函数,根据当前的处理进度i显示进度
# 效果为[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>]100.00%
def show_process(self, i=None):
if i is not None:
self.i = i
else:
self.i += 1
num_arrow = int(self.i * self.max_arrow / self.max_steps) #计算显示多少个'>'
num_line = self.max_arrow - num_arrow #计算显示多少个'-'
percent = self.i * 100.0 / self.max_steps #计算完成进度,格式为xx.xx%
process_bar = '[' + '>' * num_arrow + '-' * num_line + ']'\
+ '%.2f' % percent + '%' + '\r' #带输出的字符串,'\r'表示不换行回到最左边
sys.stdout.write(process_bar) #这两句打印字符到终端
sys.stdout.flush()
if self.i >= self.max_steps:
self.close()
def close(self):
print('')
print(self.infoDone)
self.i = 0
# ————————————————
# 版权声明:本文为CSDN博主「持久决心」的原创文章,遵循CC 4.0 by-sa版权协议,转载请附上原文出处链接及本声明。
# 原文链接:https://blog.csdn.net/u013832707/article/details/73608504
| 29.909091
| 77
| 0.534195
|
import time
import sys
class ShowProcess():
# 显示处理进度的类
# 调用该类相关函数即可实现处理进度的显示
# """
i = 0
max_steps = 0
max_arrow = 50
infoDone = 'done'
def __init__(self, max_steps, infoDone = 'Done'):
self.max_steps = max_steps
self.i = 0
self.infoDone = infoDone
def show_process(self, i=None):
if i is not None:
self.i = i
else:
self.i += 1
num_arrow = int(self.i * self.max_arrow / self.max_steps)
num_line = self.max_arrow - num_arrow
percent = self.i * 100.0 / self.max_steps
process_bar = '[' + '>' * num_arrow + '-' * num_line + ']'\
+ '%.2f' % percent + '%' + '\r'
sys.stdout.write(process_bar)
sys.stdout.flush()
if self.i >= self.max_steps:
self.close()
def close(self):
print('')
print(self.infoDone)
self.i = 0
| true
| true
|
f7177a2b9fd9f213e95853d3176e200b98b80f37
| 1,793
|
py
|
Python
|
unitest/test_supermesh.py
|
JeremieMelo/ADEPT
|
f79f518197798735cb684b373e11cdcc8a80d872
|
[
"MIT"
] | 5
|
2022-02-26T09:14:47.000Z
|
2022-03-20T22:57:06.000Z
|
unitest/test_supermesh.py
|
JeremieMelo/ADEPT
|
f79f518197798735cb684b373e11cdcc8a80d872
|
[
"MIT"
] | null | null | null |
unitest/test_supermesh.py
|
JeremieMelo/ADEPT
|
f79f518197798735cb684b373e11cdcc8a80d872
|
[
"MIT"
] | null | null | null |
'''
Description:
Author: Jiaqi Gu (jqgu@utexas.edu)
Date: 2021-09-27 23:48:01
LastEditors: Jiaqi Gu (jqgu@utexas.edu)
LastEditTime: 2022-02-26 02:22:52
'''
import torch
from core.models.layers.super_mesh import super_layer_name_dict
def test():
device=torch.device("cuda:0")
p, q, k = 2, 2, 4
x = torch.eye(k, dtype=torch.cfloat, device=device).unsqueeze(0).repeat(q,1,1).permute(1,0,2).contiguous()
sigma = torch.ones(p,q,k, device=device)
# x [bs, q, k]
arch = dict(
n_waveguides=k,
n_front_share_waveguides=k,
n_front_share_ops=k,
n_blocks=4,
n_layers_per_block=2,
n_front_share_blocks=2,
share_ps="row_col",
interleave_dc=True,
)
sample_arch = [
k//3,1,
k//2,1,
k//2,1,
k//2,1,
4
]
layer = super_layer_name_dict["ps_dc_cr"](arch, device=device)
super_ps_layers = layer.build_ps_layser(grid_dim_x=q, grid_dim_y=p)
for m in super_ps_layers:
# m.reset_parameters(alg="identity")
m.reset_parameters(alg="uniform")
layer.set_sample_arch(sample_arch)
print(layer)
layer.set_identity_cr()
layer.build_sampling_coefficients()
layer.set_gumbel_temperature(0.1)
layer.set_aux_skip_path(0)
layer.build_arch_mask()
U,V = layer.get_UV(super_ps_layers, q, p)
print(U, U.size())
print(U[0,0].conj().t().matmul(U[0,0]))
print(V)
print(V[0,0].conj().t().matmul(V[0,0]))
weight = layer.get_weight_matrix(super_ps_layers, sigma)
print(weight)
weight.sum().backward()
print(super_ps_layers[0].weight.grad.norm(p=2))
print(layer.super_layers_all[0].weight.grad.norm(p=2))
print(layer.super_layers_all[1].weight.grad.norm(p=2))
if __name__ == "__main__":
test()
| 28.460317
| 110
| 0.644172
|
import torch
from core.models.layers.super_mesh import super_layer_name_dict
def test():
device=torch.device("cuda:0")
p, q, k = 2, 2, 4
x = torch.eye(k, dtype=torch.cfloat, device=device).unsqueeze(0).repeat(q,1,1).permute(1,0,2).contiguous()
sigma = torch.ones(p,q,k, device=device)
arch = dict(
n_waveguides=k,
n_front_share_waveguides=k,
n_front_share_ops=k,
n_blocks=4,
n_layers_per_block=2,
n_front_share_blocks=2,
share_ps="row_col",
interleave_dc=True,
)
sample_arch = [
k//3,1,
k//2,1,
k//2,1,
k//2,1,
4
]
layer = super_layer_name_dict["ps_dc_cr"](arch, device=device)
super_ps_layers = layer.build_ps_layser(grid_dim_x=q, grid_dim_y=p)
for m in super_ps_layers:
m.reset_parameters(alg="uniform")
layer.set_sample_arch(sample_arch)
print(layer)
layer.set_identity_cr()
layer.build_sampling_coefficients()
layer.set_gumbel_temperature(0.1)
layer.set_aux_skip_path(0)
layer.build_arch_mask()
U,V = layer.get_UV(super_ps_layers, q, p)
print(U, U.size())
print(U[0,0].conj().t().matmul(U[0,0]))
print(V)
print(V[0,0].conj().t().matmul(V[0,0]))
weight = layer.get_weight_matrix(super_ps_layers, sigma)
print(weight)
weight.sum().backward()
print(super_ps_layers[0].weight.grad.norm(p=2))
print(layer.super_layers_all[0].weight.grad.norm(p=2))
print(layer.super_layers_all[1].weight.grad.norm(p=2))
if __name__ == "__main__":
test()
| true
| true
|
f7177a7169c09bf58f9be260fccfe1d0276b2e83
| 712
|
py
|
Python
|
parsons/google/utitities.py
|
Tomiiwa/parsons
|
3886327c197e357ba5342603d8409774a541333b
|
[
"Apache-2.0"
] | 3
|
2019-09-05T16:57:15.000Z
|
2019-10-01T19:56:58.000Z
|
parsons/google/utitities.py
|
Tomiiwa/parsons
|
3886327c197e357ba5342603d8409774a541333b
|
[
"Apache-2.0"
] | 22
|
2019-09-03T13:23:37.000Z
|
2019-10-03T20:32:48.000Z
|
parsons/google/utitities.py
|
Tomiiwa/parsons
|
3886327c197e357ba5342603d8409774a541333b
|
[
"Apache-2.0"
] | 2
|
2019-09-01T18:30:10.000Z
|
2019-10-03T20:07:46.000Z
|
from parsons.utilities import files
from parsons.utilities import check_env
import json
import os
def setup_google_application_credentials(app_creds, env_var_name='GOOGLE_APPLICATION_CREDENTIALS'):
# Detect if app_creds is a dict, path string or json string, and if it is a
# json string, then convert it to a temporary file. Then set the
# environmental variable.
credentials = check_env.check(env_var_name, app_creds)
try:
if (type(credentials) is dict):
credentials = json.dumps(credentials)
creds_path = files.string_to_temp_file(credentials, suffix='.json')
except ValueError:
creds_path = credentials
os.environ[env_var_name] = creds_path
| 35.6
| 99
| 0.738764
|
from parsons.utilities import files
from parsons.utilities import check_env
import json
import os
def setup_google_application_credentials(app_creds, env_var_name='GOOGLE_APPLICATION_CREDENTIALS'):
credentials = check_env.check(env_var_name, app_creds)
try:
if (type(credentials) is dict):
credentials = json.dumps(credentials)
creds_path = files.string_to_temp_file(credentials, suffix='.json')
except ValueError:
creds_path = credentials
os.environ[env_var_name] = creds_path
| true
| true
|
f7177a79f201b728ccb90ed68b5736930baa2a1a
| 4,126
|
py
|
Python
|
xsimlab/ipython.py
|
jvail/xarray-simlab
|
3e8cb81775868e3e7c6495489ba351567e0d7e42
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 48
|
2017-06-19T16:31:37.000Z
|
2021-04-26T04:42:48.000Z
|
xsimlab/ipython.py
|
jvail/xarray-simlab
|
3e8cb81775868e3e7c6495489ba351567e0d7e42
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 108
|
2017-06-26T12:22:10.000Z
|
2021-03-09T08:57:02.000Z
|
xsimlab/ipython.py
|
jvail/xarray-simlab
|
3e8cb81775868e3e7c6495489ba351567e0d7e42
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 10
|
2017-08-11T04:56:20.000Z
|
2021-03-01T16:46:55.000Z
|
import textwrap
from IPython.core.magic import Magics, magics_class, line_magic
from IPython.core import magic_arguments
import attr
from .formatting import format_var_dims
from .model import Model
from .utils import variables_dict
setup_template = """
import xsimlab as xs
ds_in = xs.create_setup(
model={model},
clocks={{}},
input_vars={{
{in_vars}
}},
output_vars={{}}
)
"""
def format_var_comment(var, verbose=0):
comment = ""
if verbose:
var_desc = var.metadata["description"]
if var_desc:
comment += textwrap.fill(
var_desc, width=86, initial_indent="# ", subsequent_indent="# "
)
else:
comment += "# ---"
comment += "\n"
if verbose > 1:
var_dims = format_var_dims(var)
if var_dims:
comment += f"# dimensions: {var_dims}\n"
if var.metadata["static"]:
comment += f"# static: main clock dimension not supported\n"
if verbose > 2:
var_attrs = var.metadata.get("attrs", False)
if var_attrs:
for k, v in var_attrs.items():
comment += f"# {k}: {v}\n"
return comment
def format_input_vars(
model, skip_default=False, default=False, verbose=0, nested=False
):
lines = []
for pn, vnames in model.input_vars_dict.items():
plines = []
for vn in vnames:
var = variables_dict(type(model[pn]))[vn]
if skip_default and var.default is not attr.NOTHING:
continue
if default and var.default is not attr.NOTHING:
default_val = f"{var.default!r}"
else:
default_val = ""
comment = format_var_comment(var, verbose=verbose)
if nested:
plines.append(comment + f"'{vn}': {default_val},")
else:
lines.append(comment + f"'{pn}__{vn}': {default_val},")
if nested and plines:
pfmt = textwrap.indent("\n".join(plines), " " * 4)
lines.append(f"'{pn}': {{\n{pfmt}\n}},")
return textwrap.indent("\n".join(lines), " " * 8)[8:]
@magics_class
class SimulationMagics(Magics):
@line_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument("model", help="xsimlab.Model object")
@magic_arguments.argument(
"-s",
"--skip-default",
action="store_true",
default=False,
help="Don't add input variables that have default values",
)
@magic_arguments.argument(
"-d",
"--default",
action="store_true",
default=False,
help="Add input variables default values, if any (ignored if --skip-default)",
)
@magic_arguments.argument(
"-v",
"--verbose",
action="count",
default=0,
help="Increase verbosity (i.e., add more input variables info as comments)",
)
@magic_arguments.argument(
"-n",
"--nested",
action="store_true",
default=False,
help="Group input variables by process",
)
def create_setup(self, line=""):
"""Pre-fill the current cell with a new simulation setup."""
args = magic_arguments.parse_argstring(self.create_setup, line)
model_obj = self.shell.user_ns.get(args.model)
if model_obj is None:
raise KeyError(f"Model '{args.model}' not defined or not imported")
elif not isinstance(model_obj, Model):
raise TypeError(f"'{args.model}' is not a xsimlab.Model object")
rendered = setup_template.format(
model=args.model,
in_vars=format_input_vars(
model_obj,
skip_default=args.skip_default,
default=args.default,
verbose=args.verbose,
nested=args.nested,
),
)
content = f"# %create_setup {line}" + rendered
self.shell.set_next_input(content, replace=True)
def load_ipython_extension(ipython):
ipython.register_magics(SimulationMagics)
| 27.506667
| 86
| 0.574649
|
import textwrap
from IPython.core.magic import Magics, magics_class, line_magic
from IPython.core import magic_arguments
import attr
from .formatting import format_var_dims
from .model import Model
from .utils import variables_dict
setup_template = """
import xsimlab as xs
ds_in = xs.create_setup(
model={model},
clocks={{}},
input_vars={{
{in_vars}
}},
output_vars={{}}
)
"""
def format_var_comment(var, verbose=0):
comment = ""
if verbose:
var_desc = var.metadata["description"]
if var_desc:
comment += textwrap.fill(
var_desc, width=86, initial_indent="# ", subsequent_indent="# "
)
else:
comment += "# ---"
comment += "\n"
if verbose > 1:
var_dims = format_var_dims(var)
if var_dims:
comment += f"# dimensions: {var_dims}\n"
if var.metadata["static"]:
comment += f"# static: main clock dimension not supported\n"
if verbose > 2:
var_attrs = var.metadata.get("attrs", False)
if var_attrs:
for k, v in var_attrs.items():
comment += f"# {k}: {v}\n"
return comment
def format_input_vars(
model, skip_default=False, default=False, verbose=0, nested=False
):
lines = []
for pn, vnames in model.input_vars_dict.items():
plines = []
for vn in vnames:
var = variables_dict(type(model[pn]))[vn]
if skip_default and var.default is not attr.NOTHING:
continue
if default and var.default is not attr.NOTHING:
default_val = f"{var.default!r}"
else:
default_val = ""
comment = format_var_comment(var, verbose=verbose)
if nested:
plines.append(comment + f"'{vn}': {default_val},")
else:
lines.append(comment + f"'{pn}__{vn}': {default_val},")
if nested and plines:
pfmt = textwrap.indent("\n".join(plines), " " * 4)
lines.append(f"'{pn}': {{\n{pfmt}\n}},")
return textwrap.indent("\n".join(lines), " " * 8)[8:]
@magics_class
class SimulationMagics(Magics):
@line_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument("model", help="xsimlab.Model object")
@magic_arguments.argument(
"-s",
"--skip-default",
action="store_true",
default=False,
help="Don't add input variables that have default values",
)
@magic_arguments.argument(
"-d",
"--default",
action="store_true",
default=False,
help="Add input variables default values, if any (ignored if --skip-default)",
)
@magic_arguments.argument(
"-v",
"--verbose",
action="count",
default=0,
help="Increase verbosity (i.e., add more input variables info as comments)",
)
@magic_arguments.argument(
"-n",
"--nested",
action="store_true",
default=False,
help="Group input variables by process",
)
def create_setup(self, line=""):
args = magic_arguments.parse_argstring(self.create_setup, line)
model_obj = self.shell.user_ns.get(args.model)
if model_obj is None:
raise KeyError(f"Model '{args.model}' not defined or not imported")
elif not isinstance(model_obj, Model):
raise TypeError(f"'{args.model}' is not a xsimlab.Model object")
rendered = setup_template.format(
model=args.model,
in_vars=format_input_vars(
model_obj,
skip_default=args.skip_default,
default=args.default,
verbose=args.verbose,
nested=args.nested,
),
)
content = f"# %create_setup {line}" + rendered
self.shell.set_next_input(content, replace=True)
def load_ipython_extension(ipython):
ipython.register_magics(SimulationMagics)
| true
| true
|
f7177b1e4046a2c4d8c4f139594073d0ad624f46
| 752
|
py
|
Python
|
api/tournaments/migrations/0002_auto_20190804_1830.py
|
individuo7/wololo-tournaments-api
|
5be6284064373e99346d39c78844e454c41c501d
|
[
"MIT"
] | 2
|
2019-12-09T10:19:36.000Z
|
2020-01-11T11:48:41.000Z
|
api/tournaments/migrations/0002_auto_20190804_1830.py
|
individuo7/wololo-tournaments-api
|
5be6284064373e99346d39c78844e454c41c501d
|
[
"MIT"
] | null | null | null |
api/tournaments/migrations/0002_auto_20190804_1830.py
|
individuo7/wololo-tournaments-api
|
5be6284064373e99346d39c78844e454c41c501d
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.3 on 2019-08-04 18:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tournaments', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='game',
name='tournament',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='games', to='tournaments.Tournament'),
),
migrations.AlterField(
model_name='playergame',
name='team',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='tournaments.Team'),
preserve_default=False,
),
]
| 28.923077
| 132
| 0.630319
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tournaments', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='game',
name='tournament',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='games', to='tournaments.Tournament'),
),
migrations.AlterField(
model_name='playergame',
name='team',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='tournaments.Team'),
preserve_default=False,
),
]
| true
| true
|
f7177be77bf1953daf64f669f1516a9413569e6f
| 1,785
|
py
|
Python
|
model/contact.py
|
dorotan/pythontraining
|
13cd9d5d8b0c772951e9caf98166118e7ffa387c
|
[
"Apache-2.0"
] | null | null | null |
model/contact.py
|
dorotan/pythontraining
|
13cd9d5d8b0c772951e9caf98166118e7ffa387c
|
[
"Apache-2.0"
] | null | null | null |
model/contact.py
|
dorotan/pythontraining
|
13cd9d5d8b0c772951e9caf98166118e7ffa387c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
__author__ = 'dorota'
from sys import maxsize
class Contact:
def __init__(self, first_name=None, middle_name=None, last_name=None, nickname=None, title=None, company=None,
address=None, home_number=None, mobile_number=None, work_number=None, fax=None, first_email=None,
second_email=None, third_email=None, wwwpage=None, birth_year=None, anniversary_year=None,
second_address=None, second_private_number=None, notes=None, id= None, all_phones_from_homepage=None):
self.first_name = first_name
self.middle_name = middle_name
self.last_name = last_name
self.nickname = nickname
self.title = title
self.company = company
self.address = address
self.home_number = home_number
self.mobile_number = mobile_number
self.work_number = work_number
self.fax = fax
self.first_email = first_email
self.second_email = second_email
self.third_email = third_email
self.wwwpage = wwwpage
self.birth_year = birth_year
self.anniversary_year = anniversary_year
self.second_address = second_address
self.second_private_number = second_private_number
self.notes = notes
self.id = id
self.all_phones_from_homepage=all_phones_from_homepage
def __repr__(self):
return "%s:%s %s" % (self.id, self.first_name, self.last_name)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id)\
and self.first_name == other.first_name and self.last_name == other.last_name
def id_or_max(con):
if con.id:
return int(con.id)
else:
return maxsize
| 38.804348
| 119
| 0.656022
|
__author__ = 'dorota'
from sys import maxsize
class Contact:
def __init__(self, first_name=None, middle_name=None, last_name=None, nickname=None, title=None, company=None,
address=None, home_number=None, mobile_number=None, work_number=None, fax=None, first_email=None,
second_email=None, third_email=None, wwwpage=None, birth_year=None, anniversary_year=None,
second_address=None, second_private_number=None, notes=None, id= None, all_phones_from_homepage=None):
self.first_name = first_name
self.middle_name = middle_name
self.last_name = last_name
self.nickname = nickname
self.title = title
self.company = company
self.address = address
self.home_number = home_number
self.mobile_number = mobile_number
self.work_number = work_number
self.fax = fax
self.first_email = first_email
self.second_email = second_email
self.third_email = third_email
self.wwwpage = wwwpage
self.birth_year = birth_year
self.anniversary_year = anniversary_year
self.second_address = second_address
self.second_private_number = second_private_number
self.notes = notes
self.id = id
self.all_phones_from_homepage=all_phones_from_homepage
def __repr__(self):
return "%s:%s %s" % (self.id, self.first_name, self.last_name)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id)\
and self.first_name == other.first_name and self.last_name == other.last_name
def id_or_max(con):
if con.id:
return int(con.id)
else:
return maxsize
| true
| true
|
f7177c158a0506efce3af2ecad52f923c731c8ea
| 1,903
|
py
|
Python
|
inference_exploration/cpu/main.py
|
nbortolotti/tflite-tpu-experiences
|
8f613e059335d1d90886282f005261917fd9cfd3
|
[
"Apache-2.0"
] | 1
|
2019-12-06T12:58:33.000Z
|
2019-12-06T12:58:33.000Z
|
inference_exploration/cpu/main.py
|
nbortolotti/tflite-tpu-experiences
|
8f613e059335d1d90886282f005261917fd9cfd3
|
[
"Apache-2.0"
] | 9
|
2020-10-12T13:57:32.000Z
|
2021-09-16T19:38:26.000Z
|
inference_exploration/cpu/main.py
|
nbortolotti/tflite-tpu-experiences
|
8f613e059335d1d90886282f005261917fd9cfd3
|
[
"Apache-2.0"
] | null | null | null |
import os
import numpy as np
import PIL.Image as Image
import matplotlib.pylab as plt
import time
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow.keras import layers
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def image_analysis(classifier, image_shape, img_array):
result = classifier.predict(img_array[np.newaxis, ...])
# result.shape
predicted_class = np.argmax(result[0], axis=-1)
return predicted_class
def main():
classifier_url = "https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/classification/4"
image_shape = (224, 224)
classifier = tf.keras.Sequential([
hub.KerasLayer(classifier_url, input_shape=image_shape + (3,))
])
img_file = tf.keras.utils.get_file('image.jpg', 'https://storage.googleapis.com/demostration_images/2.jpg')
img = Image.open(img_file).resize(image_shape)
img_array = np.array(img) / 255.0
# img_array.shape
predicted_class = image_analysis(classifier, image_shape, img_array)
labels_path = tf.keras.utils.get_file('ImageNetLabels.txt',
'https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt')
imagenet_labels = np.array(open(labels_path).read().splitlines())
#
# plt.imshow(img_array)
# plt.axis('off')
# predicted_class_name = imagenet_labels[predicted_class]
# _ = plt.title("Prediction: " + predicted_class_name.title())
# plt.show()
for _ in range(5):
inferenceTime(img_array, classifier)
# explore time to do the inference
def inferenceTime(image, mClassifier):
start = time.time()
result = mClassifier.predict(image[np.newaxis, ...])
end = time.time()
print((end - start)*1000) #milliseconds
# predicted_class = np.argmax(result[0], axis=-1)
# predicted_class_name = mLabels[predicted_class]
if __name__ == '__main__':
main()
| 30.206349
| 123
| 0.695218
|
import os
import numpy as np
import PIL.Image as Image
import matplotlib.pylab as plt
import time
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow.keras import layers
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def image_analysis(classifier, image_shape, img_array):
result = classifier.predict(img_array[np.newaxis, ...])
predicted_class = np.argmax(result[0], axis=-1)
return predicted_class
def main():
classifier_url = "https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/classification/4"
image_shape = (224, 224)
classifier = tf.keras.Sequential([
hub.KerasLayer(classifier_url, input_shape=image_shape + (3,))
])
img_file = tf.keras.utils.get_file('image.jpg', 'https://storage.googleapis.com/demostration_images/2.jpg')
img = Image.open(img_file).resize(image_shape)
img_array = np.array(img) / 255.0
predicted_class = image_analysis(classifier, image_shape, img_array)
labels_path = tf.keras.utils.get_file('ImageNetLabels.txt',
'https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt')
imagenet_labels = np.array(open(labels_path).read().splitlines())
for _ in range(5):
inferenceTime(img_array, classifier)
def inferenceTime(image, mClassifier):
start = time.time()
result = mClassifier.predict(image[np.newaxis, ...])
end = time.time()
print((end - start)*1000)
if __name__ == '__main__':
main()
| true
| true
|
f7177d19acc9ab604ff8ca8dd4f7629ce32e4671
| 203
|
py
|
Python
|
EOC/prototype/data/datasets/__init__.py
|
double-fire-0/SystemNoise
|
ab042dd54371482a18117eb13f816a7472e51590
|
[
"Apache-2.0"
] | null | null | null |
EOC/prototype/data/datasets/__init__.py
|
double-fire-0/SystemNoise
|
ab042dd54371482a18117eb13f816a7472e51590
|
[
"Apache-2.0"
] | null | null | null |
EOC/prototype/data/datasets/__init__.py
|
double-fire-0/SystemNoise
|
ab042dd54371482a18117eb13f816a7472e51590
|
[
"Apache-2.0"
] | null | null | null |
from .imagenet_dataset import ImageNetDataset, RankedImageNetDataset, DecoderResizeImageNetDataset # noqa
from .custom_dataset import CustomDataset # noqa
from .imagnetc import ImageNet_C_Dataset
| 33.833333
| 106
| 0.842365
|
from .imagenet_dataset import ImageNetDataset, RankedImageNetDataset, DecoderResizeImageNetDataset
from .custom_dataset import CustomDataset
from .imagnetc import ImageNet_C_Dataset
| true
| true
|
f7177d37c526cd723adac7c722303a77bd48abdf
| 418
|
py
|
Python
|
blog/migrations/0006_auto_20220427_1014.py
|
ali-abbaszade/mysite
|
9ef1b1211bd827c178f279e69ddbf4c229c539fa
|
[
"MIT"
] | null | null | null |
blog/migrations/0006_auto_20220427_1014.py
|
ali-abbaszade/mysite
|
9ef1b1211bd827c178f279e69ddbf4c229c539fa
|
[
"MIT"
] | null | null | null |
blog/migrations/0006_auto_20220427_1014.py
|
ali-abbaszade/mysite
|
9ef1b1211bd827c178f279e69ddbf4c229c539fa
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.12 on 2022-04-27 05:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20220427_1002'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='category',
),
migrations.DeleteModel(
name='Category',
),
]
| 19.904762
| 49
| 0.538278
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20220427_1002'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='category',
),
migrations.DeleteModel(
name='Category',
),
]
| true
| true
|
f7177e32b5e25c3c506bb440f649164a5758e294
| 46,139
|
py
|
Python
|
manila/share/drivers/generic.py
|
vponomaryov/manila
|
ffe135a5b35a0964179f0dc148d569037f26a929
|
[
"Apache-2.0"
] | null | null | null |
manila/share/drivers/generic.py
|
vponomaryov/manila
|
ffe135a5b35a0964179f0dc148d569037f26a929
|
[
"Apache-2.0"
] | null | null | null |
manila/share/drivers/generic.py
|
vponomaryov/manila
|
ffe135a5b35a0964179f0dc148d569037f26a929
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Driver for shares."""
import os
import time
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils
from oslo_utils import units
import retrying
import six
from manila.common import constants as const
from manila import compute
from manila import context
from manila import exception
from manila.i18n import _, _LE, _LI, _LW
from manila.share import driver
from manila.share.drivers import service_instance
from manila import utils
from manila import volume
LOG = log.getLogger(__name__)
share_opts = [
cfg.StrOpt('smb_template_config_path',
default='$state_path/smb.conf',
help="Path to smb config."),
cfg.StrOpt('volume_name_template',
default='manila-share-%s',
help="Volume name template."),
cfg.StrOpt('volume_snapshot_name_template',
default='manila-snapshot-%s',
help="Volume snapshot name template."),
cfg.StrOpt('share_mount_path',
default='/shares',
help="Parent path in service instance where shares "
"will be mounted."),
cfg.IntOpt('max_time_to_create_volume',
default=180,
help="Maximum time to wait for creating cinder volume."),
cfg.IntOpt('max_time_to_extend_volume',
default=180,
help="Maximum time to wait for extending cinder volume."),
cfg.IntOpt('max_time_to_attach',
default=120,
help="Maximum time to wait for attaching cinder volume."),
cfg.StrOpt('service_instance_smb_config_path',
default='$share_mount_path/smb.conf',
help="Path to SMB config in service instance."),
cfg.ListOpt('share_helpers',
default=[
'CIFS=manila.share.drivers.helpers.CIFSHelperIPAccess',
'NFS=manila.share.drivers.helpers.NFSHelper',
],
help='Specify list of share export helpers.'),
cfg.StrOpt('share_volume_fstype',
default='ext4',
choices=['ext4', 'ext3'],
help='Filesystem type of the share volume.'),
cfg.StrOpt('cinder_volume_type',
help='Name or id of cinder volume type which will be used '
'for all volumes created by driver.'),
]
CONF = cfg.CONF
CONF.register_opts(share_opts)
# NOTE(u_glide): These constants refer to the column number in the "df" output
BLOCK_DEVICE_SIZE_INDEX = 1
USED_SPACE_INDEX = 2
def ensure_server(f):
def wrap(self, context, *args, **kwargs):
server = kwargs.get('share_server')
if not self.driver_handles_share_servers:
if not server:
server = self.service_instance_manager.get_common_server()
kwargs['share_server'] = server
else:
raise exception.ManilaException(
_("Share server handling is not available. "
"But 'share_server' was provided. '%s'. "
"Share network should not be used.") % server.get('id'))
elif not server:
raise exception.ManilaException(
_("Share server handling is enabled. But 'share_server' "
"is not provided. Make sure you used 'share_network'."))
if not server.get('backend_details'):
raise exception.ManilaException(
_("Share server '%s' does not have backend details.") %
server['id'])
if not self.service_instance_manager.ensure_service_instance(
context, server['backend_details']):
raise exception.ServiceInstanceUnavailable()
return f(self, context, *args, **kwargs)
return wrap
class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
"""Executes commands relating to Shares."""
def __init__(self, *args, **kwargs):
"""Do initialization."""
super(GenericShareDriver, self).__init__(
[False, True], *args, **kwargs)
self.admin_context = context.get_admin_context()
self.configuration.append_config_values(share_opts)
self._helpers = {}
self.backend_name = self.configuration.safe_get(
'share_backend_name') or "Cinder_Volumes"
self.ssh_connections = {}
self._setup_service_instance_manager()
self.private_storage = kwargs.get('private_storage')
def _setup_service_instance_manager(self):
self.service_instance_manager = (
service_instance.ServiceInstanceManager(
driver_config=self.configuration))
def _ssh_exec(self, server, command, check_exit_code=True):
connection = self.ssh_connections.get(server['instance_id'])
ssh_conn_timeout = self.configuration.ssh_conn_timeout
if not connection:
ssh_pool = utils.SSHPool(server['ip'],
22,
ssh_conn_timeout,
server['username'],
server.get('password'),
server.get('pk_path'),
max_size=1)
ssh = ssh_pool.create()
self.ssh_connections[server['instance_id']] = (ssh_pool, ssh)
else:
ssh_pool, ssh = connection
if not ssh.get_transport().is_active():
ssh_pool.remove(ssh)
ssh = ssh_pool.create()
self.ssh_connections[server['instance_id']] = (ssh_pool, ssh)
# (aovchinnikov): ssh_execute does not behave well when passed
# parameters with spaces.
wrap = lambda token: "\"" + token + "\""
command = [wrap(tkn) if tkn.count(' ') else tkn for tkn in command]
return processutils.ssh_execute(ssh, ' '.join(command),
check_exit_code=check_exit_code)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
def do_setup(self, context):
"""Any initialization the generic driver does while starting."""
super(GenericShareDriver, self).do_setup(context)
self.compute_api = compute.API()
self.volume_api = volume.API()
self._setup_helpers()
common_sv_available = False
share_server = None
sv_fetch_retry_interval = 5
while not (common_sv_available or self.driver_handles_share_servers):
try:
# Verify availability of common server
share_server = (
self.service_instance_manager.get_common_server())
common_sv_available = self._is_share_server_active(
context, share_server)
except Exception as ex:
LOG.error(ex)
if not common_sv_available:
time.sleep(sv_fetch_retry_interval)
LOG.warning(_LW("Waiting for the common service VM to become "
"available. "
"Driver is currently uninitialized. "
"Share server: %(share_server)s "
"Retry interval: %(retry_interval)s"),
dict(share_server=share_server,
retry_interval=sv_fetch_retry_interval))
def _setup_helpers(self):
"""Initializes protocol-specific NAS drivers."""
helpers = self.configuration.share_helpers
if helpers:
for helper_str in helpers:
share_proto, __, import_str = helper_str.partition('=')
helper = importutils.import_class(import_str)
self._helpers[share_proto.upper()] = helper(
self._execute,
self._ssh_exec,
self.configuration)
else:
raise exception.ManilaException(
"No protocol helpers selected for Generic Driver. "
"Please specify using config option 'share_helpers'.")
@ensure_server
def create_share(self, context, share, share_server=None):
"""Creates share."""
return self._create_share(
context, share,
snapshot=None,
share_server=share_server,
)
def _create_share(self, context, share, snapshot, share_server=None):
helper = self._get_helper(share)
server_details = share_server['backend_details']
volume = self._allocate_container(
self.admin_context, share, snapshot=snapshot)
volume = self._attach_volume(
self.admin_context, share, server_details['instance_id'], volume)
if not snapshot:
self._format_device(server_details, volume)
self._mount_device(share, server_details, volume)
export_locations = helper.create_exports(
server_details, share['name'])
return export_locations
@utils.retry(exception.ProcessExecutionError, backoff_rate=1)
def _is_device_file_available(self, server_details, volume):
"""Checks whether the device file is available"""
command = ['sudo', 'test', '-b', volume['mountpoint']]
self._ssh_exec(server_details, command)
def _format_device(self, server_details, volume):
"""Formats device attached to the service vm."""
self._is_device_file_available(server_details, volume)
command = ['sudo', 'mkfs.%s' % self.configuration.share_volume_fstype,
volume['mountpoint']]
self._ssh_exec(server_details, command)
def _is_device_mounted(self, mount_path, server_details, volume=None):
"""Checks whether volume already mounted or not."""
log_data = {
'mount_path': mount_path,
'server_id': server_details['instance_id'],
}
if volume and volume.get('mountpoint', ''):
log_data['volume_id'] = volume['id']
log_data['dev_mount_path'] = volume['mountpoint']
msg = ("Checking whether volume '%(volume_id)s' with mountpoint "
"'%(dev_mount_path)s' is mounted on mount path '%(mount_p"
"ath)s' on server '%(server_id)s' or not." % log_data)
else:
msg = ("Checking whether mount path '%(mount_path)s' exists on "
"server '%(server_id)s' or not." % log_data)
LOG.debug(msg)
mounts_list_cmd = ['sudo', 'mount']
output, __ = self._ssh_exec(server_details, mounts_list_cmd)
mounts = output.split('\n')
for mount in mounts:
mount_elements = mount.split(' ')
if (len(mount_elements) > 2 and mount_path == mount_elements[2]):
if volume:
# Mount goes with device path and mount path
if (volume.get('mountpoint', '') == mount_elements[0]):
return True
else:
# Unmount goes only by mount path
return True
return False
def _sync_mount_temp_and_perm_files(self, server_details):
"""Sync temporary and permanent files for mounted filesystems."""
try:
self._ssh_exec(
server_details,
['sudo', 'cp', const.MOUNT_FILE_TEMP, const.MOUNT_FILE],
)
except exception.ProcessExecutionError as e:
LOG.error(_LE("Failed to sync mount files on server '%s'."),
server_details['instance_id'])
raise exception.ShareBackendException(msg=six.text_type(e))
try:
# Remount it to avoid postponed point of failure
self._ssh_exec(server_details, ['sudo', 'mount', '-a'])
except exception.ProcessExecutionError as e:
LOG.error(_LE("Failed to mount all shares on server '%s'."),
server_details['instance_id'])
raise exception.ShareBackendException(msg=six.text_type(e))
def _mount_device(self, share, server_details, volume):
"""Mounts block device to the directory on service vm.
Mounts attached and formatted block device to the directory if not
mounted yet.
"""
@utils.synchronized('generic_driver_mounts_'
'%s' % server_details['instance_id'])
def _mount_device_with_lock():
mount_path = self._get_mount_path(share)
device_path = volume['mountpoint']
log_data = {
'dev': device_path,
'path': mount_path,
'server': server_details['instance_id'],
}
try:
if not self._is_device_mounted(mount_path, server_details,
volume):
LOG.debug("Mounting '%(dev)s' to path '%(path)s' on "
"server '%(server)s'.", log_data)
mount_cmd = (
'sudo', 'mkdir', '-p', mount_path,
'&&', 'sudo', 'mount', device_path, mount_path,
'&&', 'sudo', 'chmod', '777', mount_path,
'&&', 'sudo', 'umount', mount_path,
# NOTE(vponomaryov): 'tune2fs' is required to make
# filesystem of share created from snapshot have
# unique ID, in case of LVM volumes, by default,
# it will have the same UUID as source volume one.
# 'tune2fs' command can be executed only when device
# is not mounted and also, in current case, it takes
# effect only after it was mounted. Closes #1645751
'&&', 'sudo', 'tune2fs', '-U', 'random', device_path,
'&&', 'sudo', 'mount', device_path, mount_path,
)
self._ssh_exec(server_details, mount_cmd)
# Add mount permanently
self._sync_mount_temp_and_perm_files(server_details)
else:
LOG.warning(_LW("Mount point '%(path)s' already exists on "
"server '%(server)s'."), log_data)
except exception.ProcessExecutionError as e:
raise exception.ShareBackendException(msg=six.text_type(e))
return _mount_device_with_lock()
@utils.retry(exception.ProcessExecutionError)
def _unmount_device(self, share, server_details):
"""Unmounts block device from directory on service vm."""
@utils.synchronized('generic_driver_mounts_'
'%s' % server_details['instance_id'])
def _unmount_device_with_lock():
mount_path = self._get_mount_path(share)
log_data = {
'path': mount_path,
'server': server_details['instance_id'],
}
if self._is_device_mounted(mount_path, server_details):
LOG.debug("Unmounting path '%(path)s' on server "
"'%(server)s'.", log_data)
unmount_cmd = ['sudo', 'umount', mount_path, '&&', 'sudo',
'rmdir', mount_path]
self._ssh_exec(server_details, unmount_cmd)
# Remove mount permanently
self._sync_mount_temp_and_perm_files(server_details)
else:
LOG.warning(_LW("Mount point '%(path)s' does not exist on "
"server '%(server)s'."), log_data)
return _unmount_device_with_lock()
def _get_mount_path(self, share):
"""Returns the path to use for mount device in service vm."""
return os.path.join(self.configuration.share_mount_path, share['name'])
def _attach_volume(self, context, share, instance_id, volume):
"""Attaches cinder volume to service vm."""
@utils.synchronized(
"generic_driver_attach_detach_%s" % instance_id, external=True)
def do_attach(volume):
if volume['status'] == 'in-use':
attached_volumes = [vol.id for vol in
self.compute_api.instance_volumes_list(
self.admin_context, instance_id)]
if volume['id'] in attached_volumes:
return volume
else:
raise exception.ManilaException(
_('Volume %s is already attached to another instance')
% volume['id'])
@retrying.retry(stop_max_attempt_number=3,
wait_fixed=2000,
retry_on_exception=lambda exc: True)
def attach_volume():
self.compute_api.instance_volume_attach(
self.admin_context, instance_id, volume['id'])
attach_volume()
t = time.time()
while time.time() - t < self.configuration.max_time_to_attach:
volume = self.volume_api.get(context, volume['id'])
if volume['status'] == 'in-use':
return volume
elif volume['status'] != 'attaching':
raise exception.ManilaException(
_('Failed to attach volume %s') % volume['id'])
time.sleep(1)
else:
err_msg = {
'volume_id': volume['id'],
'max_time': self.configuration.max_time_to_attach
}
raise exception.ManilaException(
_('Volume %(volume_id)s has not been attached in '
'%(max_time)ss. Giving up.') % err_msg)
return do_attach(volume)
def _get_volume_name(self, share_id):
return self.configuration.volume_name_template % share_id
def _get_volume(self, context, share_id):
"""Finds volume, associated to the specific share."""
volume_id = self.private_storage.get(share_id, 'volume_id')
if volume_id is not None:
return self.volume_api.get(context, volume_id)
else: # Fallback to legacy method
return self._get_volume_legacy(context, share_id)
def _get_volume_legacy(self, context, share_id):
# NOTE(u_glide): this method is deprecated and will be removed in
# future versions
volume_name = self._get_volume_name(share_id)
search_opts = {'name': volume_name}
if context.is_admin:
search_opts['all_tenants'] = True
volumes_list = self.volume_api.get_all(context, search_opts)
if len(volumes_list) == 1:
return volumes_list[0]
elif len(volumes_list) > 1:
LOG.error(
_LE("Expected only one volume in volume list with name "
"'%(name)s', but got more than one in a result - "
"'%(result)s'."), {
'name': volume_name, 'result': volumes_list})
raise exception.ManilaException(
_("Error. Ambiguous volumes for name '%s'") % volume_name)
return None
def _get_volume_snapshot(self, context, snapshot_id):
"""Find volume snapshot associated to the specific share snapshot."""
volume_snapshot_id = self.private_storage.get(
snapshot_id, 'volume_snapshot_id')
if volume_snapshot_id is not None:
return self.volume_api.get_snapshot(context, volume_snapshot_id)
else: # Fallback to legacy method
return self._get_volume_snapshot_legacy(context, snapshot_id)
def _get_volume_snapshot_legacy(self, context, snapshot_id):
# NOTE(u_glide): this method is deprecated and will be removed in
# future versions
volume_snapshot_name = (
self.configuration.volume_snapshot_name_template % snapshot_id)
volume_snapshot_list = self.volume_api.get_all_snapshots(
context, {'name': volume_snapshot_name})
volume_snapshot = None
if len(volume_snapshot_list) == 1:
volume_snapshot = volume_snapshot_list[0]
elif len(volume_snapshot_list) > 1:
LOG.error(
_LE("Expected only one volume snapshot in list with name "
"'%(name)s', but got more than one in a result - "
"'%(result)s'."), {
'name': volume_snapshot_name,
'result': volume_snapshot_list})
raise exception.ManilaException(
_('Error. Ambiguous volume snaphots'))
return volume_snapshot
def _detach_volume(self, context, share, server_details):
"""Detaches cinder volume from service vm."""
instance_id = server_details['instance_id']
@utils.synchronized(
"generic_driver_attach_detach_%s" % instance_id, external=True)
def do_detach():
attached_volumes = [vol.id for vol in
self.compute_api.instance_volumes_list(
self.admin_context, instance_id)]
try:
volume = self._get_volume(context, share['id'])
except exception.VolumeNotFound:
LOG.warning(_LW("Volume not found for share %s. "
"Possibly already deleted."), share['id'])
volume = None
if volume and volume['id'] in attached_volumes:
self.compute_api.instance_volume_detach(
self.admin_context,
instance_id,
volume['id']
)
t = time.time()
while time.time() - t < self.configuration.max_time_to_attach:
volume = self.volume_api.get(context, volume['id'])
if volume['status'] in (const.STATUS_AVAILABLE,
const.STATUS_ERROR):
break
time.sleep(1)
else:
err_msg = {
'volume_id': volume['id'],
'max_time': self.configuration.max_time_to_attach
}
raise exception.ManilaException(
_('Volume %(volume_id)s has not been detached in '
'%(max_time)ss. Giving up.') % err_msg)
do_detach()
def _allocate_container(self, context, share, snapshot=None):
"""Creates cinder volume, associated to share by name."""
volume_snapshot = None
if snapshot:
volume_snapshot = self._get_volume_snapshot(context,
snapshot['id'])
volume = self.volume_api.create(
context,
share['size'],
self.configuration.volume_name_template % share['id'], '',
snapshot=volume_snapshot,
volume_type=self.configuration.cinder_volume_type,
availability_zone=share['availability_zone'])
self.private_storage.update(
share['id'], {'volume_id': volume['id']})
msg_error = _('Failed to create volume')
msg_timeout = (
_('Volume has not been created in %ss. Giving up') %
self.configuration.max_time_to_create_volume
)
return self._wait_for_available_volume(
volume, self.configuration.max_time_to_create_volume,
msg_error=msg_error, msg_timeout=msg_timeout
)
def _wait_for_available_volume(self, volume, timeout,
msg_error, msg_timeout,
expected_size=None):
t = time.time()
while time.time() - t < timeout:
if volume['status'] == const.STATUS_AVAILABLE:
if expected_size and volume['size'] != expected_size:
LOG.debug("The volume %(vol_id)s is available but the "
"volume size does not match the expected size. "
"A volume resize operation may be pending. "
"Expected size: %(expected_size)s, "
"Actual size: %(volume_size)s.",
dict(vol_id=volume['id'],
expected_size=expected_size,
volume_size=volume['size']))
else:
break
elif 'error' in volume['status'].lower():
raise exception.ManilaException(msg_error)
time.sleep(1)
volume = self.volume_api.get(self.admin_context, volume['id'])
else:
raise exception.ManilaException(msg_timeout)
return volume
def _deallocate_container(self, context, share):
"""Deletes cinder volume."""
try:
volume = self._get_volume(context, share['id'])
except exception.VolumeNotFound:
LOG.info(_LI("Volume not found. Already deleted?"))
volume = None
if volume:
if volume['status'] == 'in-use':
raise exception.ManilaException(
_('Volume is still in use and '
'cannot be deleted now.'))
self.volume_api.delete(context, volume['id'])
t = time.time()
while (time.time() - t <
self.configuration.max_time_to_create_volume):
try:
volume = self.volume_api.get(context, volume['id'])
except exception.VolumeNotFound:
LOG.debug('Volume was deleted successfully')
break
time.sleep(1)
else:
raise exception.ManilaException(
_('Volume have not been '
'deleted in %ss. Giving up')
% self.configuration.max_time_to_create_volume)
def _update_share_stats(self):
"""Retrieve stats info from share volume group."""
data = dict(
share_backend_name=self.backend_name,
storage_protocol='NFS_CIFS',
reserved_percentage=self.configuration.reserved_share_percentage,
)
super(GenericShareDriver, self)._update_share_stats(data)
@ensure_server
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
"""Is called to create share from snapshot."""
return self._create_share(
context, share,
snapshot=snapshot,
share_server=share_server,
)
@ensure_server
def extend_share(self, share, new_size, share_server=None):
server_details = share_server['backend_details']
helper = self._get_helper(share)
helper.disable_access_for_maintenance(server_details, share['name'])
self._unmount_device(share, server_details)
volume = self._get_volume(self.admin_context, share['id'])
if int(new_size) > volume['size']:
self._detach_volume(self.admin_context, share, server_details)
volume = self._extend_volume(self.admin_context, volume, new_size)
volume = self._attach_volume(
self.admin_context,
share,
server_details['instance_id'],
volume)
self._resize_filesystem(server_details, volume)
self._mount_device(share, server_details, volume)
helper.restore_access_after_maintenance(server_details,
share['name'])
def _extend_volume(self, context, volume, new_size):
self.volume_api.extend(context, volume['id'], new_size)
msg_error = _('Failed to extend volume %s') % volume['id']
msg_timeout = (
_('Volume has not been extended in %ss. Giving up') %
self.configuration.max_time_to_extend_volume
)
return self._wait_for_available_volume(
volume, self.configuration.max_time_to_extend_volume,
msg_error=msg_error, msg_timeout=msg_timeout,
expected_size=new_size
)
@ensure_server
def shrink_share(self, share, new_size, share_server=None):
server_details = share_server['backend_details']
helper = self._get_helper(share)
export_location = share['export_locations'][0]['path']
mount_path = helper.get_share_path_by_export_location(
server_details, export_location)
consumed_space = self._get_consumed_space(mount_path, server_details)
LOG.debug("Consumed space on share: %s", consumed_space)
if consumed_space >= new_size:
raise exception.ShareShrinkingPossibleDataLoss(
share_id=share['id'])
volume = self._get_volume(self.admin_context, share['id'])
helper.disable_access_for_maintenance(server_details, share['name'])
self._unmount_device(share, server_details)
try:
self._resize_filesystem(server_details, volume, new_size=new_size)
except exception.Invalid:
raise exception.ShareShrinkingPossibleDataLoss(
share_id=share['id'])
except Exception as e:
msg = _("Cannot shrink share: %s") % six.text_type(e)
raise exception.Invalid(msg)
finally:
self._mount_device(share, server_details, volume)
helper.restore_access_after_maintenance(server_details,
share['name'])
def _resize_filesystem(self, server_details, volume, new_size=None):
"""Resize filesystem of provided volume."""
check_command = ['sudo', 'fsck', '-pf', volume['mountpoint']]
self._ssh_exec(server_details, check_command)
command = ['sudo', 'resize2fs', volume['mountpoint']]
if new_size:
command.append("%sG" % six.text_type(new_size))
try:
self._ssh_exec(server_details, command)
except processutils.ProcessExecutionError as e:
if e.stderr.find('New size smaller than minimum') != -1:
msg = (_("Invalid 'new_size' provided: %s")
% six.text_type(new_size))
raise exception.Invalid(msg)
else:
msg = _("Cannot resize file-system: %s") % six.text_type(e)
raise exception.ManilaException(msg)
def _is_share_server_active(self, context, share_server):
"""Check if the share server is active."""
has_active_share_server = (
share_server and share_server.get('backend_details') and
self.service_instance_manager.ensure_service_instance(
context, share_server['backend_details']))
return has_active_share_server
def delete_share(self, context, share, share_server=None):
"""Deletes share."""
helper = self._get_helper(share)
if not self.driver_handles_share_servers:
share_server = self.service_instance_manager.get_common_server()
if self._is_share_server_active(context, share_server):
helper.remove_exports(
share_server['backend_details'], share['name'])
self._unmount_device(share, share_server['backend_details'])
self._detach_volume(self.admin_context, share,
share_server['backend_details'])
# Note(jun): It is an intended breakage to deal with the cases
# with any reason that caused absence of Nova instances.
self._deallocate_container(self.admin_context, share)
self.private_storage.delete(share['id'])
def create_snapshot(self, context, snapshot, share_server=None):
"""Creates a snapshot."""
model_update = {}
volume = self._get_volume(self.admin_context, snapshot['share_id'])
volume_snapshot_name = (self.configuration.
volume_snapshot_name_template % snapshot['id'])
volume_snapshot = self.volume_api.create_snapshot_force(
self.admin_context, volume['id'], volume_snapshot_name, '')
t = time.time()
while time.time() - t < self.configuration.max_time_to_create_volume:
if volume_snapshot['status'] == const.STATUS_AVAILABLE:
break
if volume_snapshot['status'] == const.STATUS_ERROR:
raise exception.ManilaException(_('Failed to create volume '
'snapshot'))
time.sleep(1)
volume_snapshot = self.volume_api.get_snapshot(
self.admin_context,
volume_snapshot['id'])
# NOTE(xyang): We should look at whether we still need to save
# volume_snapshot_id in private_storage later, now that is saved
# in provider_location.
self.private_storage.update(
snapshot['id'], {'volume_snapshot_id': volume_snapshot['id']})
# NOTE(xyang): Need to update provider_location in the db so
# that it can be used in manage/unmanage snapshot tempest tests.
model_update['provider_location'] = volume_snapshot['id']
else:
raise exception.ManilaException(
_('Volume snapshot have not been '
'created in %ss. Giving up') %
self.configuration.max_time_to_create_volume)
return model_update
def delete_snapshot(self, context, snapshot, share_server=None):
"""Deletes a snapshot."""
volume_snapshot = self._get_volume_snapshot(self.admin_context,
snapshot['id'])
if volume_snapshot is None:
return
self.volume_api.delete_snapshot(self.admin_context,
volume_snapshot['id'])
t = time.time()
while time.time() - t < self.configuration.max_time_to_create_volume:
try:
snapshot = self.volume_api.get_snapshot(self.admin_context,
volume_snapshot['id'])
except exception.VolumeSnapshotNotFound:
LOG.debug('Volume snapshot was deleted successfully')
self.private_storage.delete(snapshot['id'])
break
time.sleep(1)
else:
raise exception.ManilaException(
_('Volume snapshot have not been '
'deleted in %ss. Giving up') %
self.configuration.max_time_to_create_volume)
@ensure_server
def ensure_share(self, context, share, share_server=None):
"""Ensure that storage are mounted and exported."""
helper = self._get_helper(share)
volume = self._get_volume(context, share['id'])
# NOTE(vponomaryov): volume can be None for managed shares
if volume:
volume = self._attach_volume(
context,
share,
share_server['backend_details']['instance_id'],
volume)
self._mount_device(share, share_server['backend_details'], volume)
helper.create_exports(
share_server['backend_details'], share['name'], recreate=True)
@ensure_server
def update_access(self, context, share, access_rules, add_rules,
delete_rules, share_server=None):
"""Update access rules for given share.
This driver has two different behaviors according to parameters:
1. Recovery after error - 'access_rules' contains all access_rules,
'add_rules' and 'delete_rules' shall be empty. Previously existing
access rules are cleared and then added back according
to 'access_rules'.
2. Adding/Deleting of several access rules - 'access_rules' contains
all access_rules, 'add_rules' and 'delete_rules' contain rules which
should be added/deleted. Rules in 'access_rules' are ignored and
only rules from 'add_rules' and 'delete_rules' are applied.
:param context: Current context
:param share: Share model with share data.
:param access_rules: All access rules for given share
:param add_rules: Empty List or List of access rules which should be
added. access_rules already contains these rules.
:param delete_rules: Empty List or List of access rules which should be
removed. access_rules doesn't contain these rules.
:param share_server: None or Share server model
"""
self._get_helper(share).update_access(share_server['backend_details'],
share['name'], access_rules,
add_rules=add_rules,
delete_rules=delete_rules)
def _get_helper(self, share):
helper = self._helpers.get(share['share_proto'])
if helper:
return helper
else:
raise exception.InvalidShare(
reason="Wrong, unsupported or disabled protocol")
def get_network_allocations_number(self):
"""Get number of network interfaces to be created."""
# NOTE(vponomaryov): Generic driver does not need allocations, because
# Nova will handle it. It is valid for all multitenant drivers, that
# use service instance provided by Nova.
return 0
def _setup_server(self, network_info, metadata=None):
msg = "Creating share server '%s'."
LOG.debug(msg % network_info['server_id'])
server = self.service_instance_manager.set_up_service_instance(
self.admin_context, network_info)
for helper in self._helpers.values():
helper.init_helper(server)
return server
def _teardown_server(self, server_details, security_services=None):
instance_id = server_details.get("instance_id")
LOG.debug("Removing share infrastructure for service instance '%s'.",
instance_id)
self.service_instance_manager.delete_service_instance(
self.admin_context, server_details)
def manage_existing(self, share, driver_options):
"""Manage existing share to manila.
Generic driver accepts only one driver_option 'volume_id'.
If an administrator provides this option, then appropriate Cinder
volume will be managed by Manila as well.
:param share: share data
:param driver_options: Empty dict or dict with 'volume_id' option.
:return: dict with share size, example: {'size': 1}
"""
helper = self._get_helper(share)
share_server = self.service_instance_manager.get_common_server()
server_details = share_server['backend_details']
old_export_location = share['export_locations'][0]['path']
mount_path = helper.get_share_path_by_export_location(
share_server['backend_details'], old_export_location)
LOG.debug("Manage: mount path = %s", mount_path)
mounted = self._is_device_mounted(mount_path, server_details)
LOG.debug("Manage: is share mounted = %s", mounted)
if not mounted:
msg = _("Provided share %s is not mounted.") % share['id']
raise exception.ManageInvalidShare(reason=msg)
def get_volume():
if 'volume_id' in driver_options:
try:
return self.volume_api.get(
self.admin_context, driver_options['volume_id'])
except exception.VolumeNotFound as e:
raise exception.ManageInvalidShare(reason=six.text_type(e))
# NOTE(vponomaryov): Manila can only combine volume name by itself,
# nowhere to get volume ID from. Return None since Cinder volume
# names are not unique or fixed, hence, they can not be used for
# sure.
return None
share_volume = get_volume()
if share_volume:
instance_volumes = self.compute_api.instance_volumes_list(
self.admin_context, server_details['instance_id'])
attached_volumes = [vol.id for vol in instance_volumes]
LOG.debug('Manage: attached volumes = %s',
six.text_type(attached_volumes))
if share_volume['id'] not in attached_volumes:
msg = _("Provided volume %s is not attached "
"to service instance.") % share_volume['id']
raise exception.ManageInvalidShare(reason=msg)
linked_volume_name = self._get_volume_name(share['id'])
if share_volume['name'] != linked_volume_name:
LOG.debug('Manage: volume_id = %s' % share_volume['id'])
self.volume_api.update(self.admin_context, share_volume['id'],
{'name': linked_volume_name})
self.private_storage.update(
share['id'], {'volume_id': share_volume['id']})
share_size = share_volume['size']
else:
share_size = self._get_mounted_share_size(
mount_path, share_server['backend_details'])
export_locations = helper.get_exports_for_share(
server_details, old_export_location)
return {'size': share_size, 'export_locations': export_locations}
def manage_existing_snapshot(self, snapshot, driver_options):
"""Manage existing share snapshot with manila.
:param snapshot: Snapshot data
:param driver_options: Not used by the Generic driver currently
:return: dict with share snapshot size, example: {'size': 1}
"""
model_update = {}
volume_snapshot = None
snapshot_size = snapshot.get('share_size', 0)
provider_location = snapshot.get('provider_location')
try:
volume_snapshot = self.volume_api.get_snapshot(
self.admin_context,
provider_location)
except exception.VolumeSnapshotNotFound as e:
raise exception.ManageInvalidShareSnapshot(
reason=six.text_type(e))
if volume_snapshot:
snapshot_size = volume_snapshot['size']
# NOTE(xyang): volume_snapshot_id is saved in private_storage
# in create_snapshot, so saving it here too for consistency.
# We should look at whether we still need to save it in
# private_storage later.
self.private_storage.update(
snapshot['id'], {'volume_snapshot_id': volume_snapshot['id']})
# NOTE(xyang): provider_location is used to map a Manila snapshot
# to its name on the storage backend and prevent managing of the
# same snapshot twice.
model_update['provider_location'] = volume_snapshot['id']
model_update['size'] = snapshot_size
return model_update
def unmanage_snapshot(self, snapshot):
"""Unmanage share snapshot with manila."""
self.private_storage.delete(snapshot['id'])
def _get_mount_stats_by_index(self, mount_path, server_details, index,
block_size='G'):
"""Get mount stats using df shell command.
:param mount_path: Share path on share server
:param server_details: Share server connection details
:param index: Data index in df command output:
BLOCK_DEVICE_SIZE_INDEX - Size of block device
USED_SPACE_INDEX - Used space
:param block_size: size of block (example: G, M, Mib, etc)
:returns: value of provided index
"""
share_size_cmd = ['df', '-PB%s' % block_size, mount_path]
output, __ = self._ssh_exec(server_details, share_size_cmd)
lines = output.split('\n')
return int(lines[1].split()[index][:-1])
def _get_mounted_share_size(self, mount_path, server_details):
try:
size = self._get_mount_stats_by_index(
mount_path, server_details, BLOCK_DEVICE_SIZE_INDEX)
except Exception as e:
msg = _("Cannot calculate size of share %(path)s : %(error)s") % {
'path': mount_path,
'error': six.text_type(e)
}
raise exception.ManageInvalidShare(reason=msg)
return size
def _get_consumed_space(self, mount_path, server_details):
try:
size = self._get_mount_stats_by_index(
mount_path, server_details, USED_SPACE_INDEX, block_size='M')
size /= float(units.Ki)
except Exception as e:
msg = _("Cannot calculate consumed space on share "
"%(path)s : %(error)s") % {
'path': mount_path,
'error': six.text_type(e)
}
raise exception.InvalidShare(reason=msg)
return size
| 44.067813
| 79
| 0.585882
|
import os
import time
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils
from oslo_utils import units
import retrying
import six
from manila.common import constants as const
from manila import compute
from manila import context
from manila import exception
from manila.i18n import _, _LE, _LI, _LW
from manila.share import driver
from manila.share.drivers import service_instance
from manila import utils
from manila import volume
LOG = log.getLogger(__name__)
share_opts = [
cfg.StrOpt('smb_template_config_path',
default='$state_path/smb.conf',
help="Path to smb config."),
cfg.StrOpt('volume_name_template',
default='manila-share-%s',
help="Volume name template."),
cfg.StrOpt('volume_snapshot_name_template',
default='manila-snapshot-%s',
help="Volume snapshot name template."),
cfg.StrOpt('share_mount_path',
default='/shares',
help="Parent path in service instance where shares "
"will be mounted."),
cfg.IntOpt('max_time_to_create_volume',
default=180,
help="Maximum time to wait for creating cinder volume."),
cfg.IntOpt('max_time_to_extend_volume',
default=180,
help="Maximum time to wait for extending cinder volume."),
cfg.IntOpt('max_time_to_attach',
default=120,
help="Maximum time to wait for attaching cinder volume."),
cfg.StrOpt('service_instance_smb_config_path',
default='$share_mount_path/smb.conf',
help="Path to SMB config in service instance."),
cfg.ListOpt('share_helpers',
default=[
'CIFS=manila.share.drivers.helpers.CIFSHelperIPAccess',
'NFS=manila.share.drivers.helpers.NFSHelper',
],
help='Specify list of share export helpers.'),
cfg.StrOpt('share_volume_fstype',
default='ext4',
choices=['ext4', 'ext3'],
help='Filesystem type of the share volume.'),
cfg.StrOpt('cinder_volume_type',
help='Name or id of cinder volume type which will be used '
'for all volumes created by driver.'),
]
CONF = cfg.CONF
CONF.register_opts(share_opts)
BLOCK_DEVICE_SIZE_INDEX = 1
USED_SPACE_INDEX = 2
def ensure_server(f):
def wrap(self, context, *args, **kwargs):
server = kwargs.get('share_server')
if not self.driver_handles_share_servers:
if not server:
server = self.service_instance_manager.get_common_server()
kwargs['share_server'] = server
else:
raise exception.ManilaException(
_("Share server handling is not available. "
"But 'share_server' was provided. '%s'. "
"Share network should not be used.") % server.get('id'))
elif not server:
raise exception.ManilaException(
_("Share server handling is enabled. But 'share_server' "
"is not provided. Make sure you used 'share_network'."))
if not server.get('backend_details'):
raise exception.ManilaException(
_("Share server '%s' does not have backend details.") %
server['id'])
if not self.service_instance_manager.ensure_service_instance(
context, server['backend_details']):
raise exception.ServiceInstanceUnavailable()
return f(self, context, *args, **kwargs)
return wrap
class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
def __init__(self, *args, **kwargs):
super(GenericShareDriver, self).__init__(
[False, True], *args, **kwargs)
self.admin_context = context.get_admin_context()
self.configuration.append_config_values(share_opts)
self._helpers = {}
self.backend_name = self.configuration.safe_get(
'share_backend_name') or "Cinder_Volumes"
self.ssh_connections = {}
self._setup_service_instance_manager()
self.private_storage = kwargs.get('private_storage')
def _setup_service_instance_manager(self):
self.service_instance_manager = (
service_instance.ServiceInstanceManager(
driver_config=self.configuration))
def _ssh_exec(self, server, command, check_exit_code=True):
connection = self.ssh_connections.get(server['instance_id'])
ssh_conn_timeout = self.configuration.ssh_conn_timeout
if not connection:
ssh_pool = utils.SSHPool(server['ip'],
22,
ssh_conn_timeout,
server['username'],
server.get('password'),
server.get('pk_path'),
max_size=1)
ssh = ssh_pool.create()
self.ssh_connections[server['instance_id']] = (ssh_pool, ssh)
else:
ssh_pool, ssh = connection
if not ssh.get_transport().is_active():
ssh_pool.remove(ssh)
ssh = ssh_pool.create()
self.ssh_connections[server['instance_id']] = (ssh_pool, ssh)
wrap = lambda token: "\"" + token + "\""
command = [wrap(tkn) if tkn.count(' ') else tkn for tkn in command]
return processutils.ssh_execute(ssh, ' '.join(command),
check_exit_code=check_exit_code)
def check_for_setup_error(self):
def do_setup(self, context):
super(GenericShareDriver, self).do_setup(context)
self.compute_api = compute.API()
self.volume_api = volume.API()
self._setup_helpers()
common_sv_available = False
share_server = None
sv_fetch_retry_interval = 5
while not (common_sv_available or self.driver_handles_share_servers):
try:
share_server = (
self.service_instance_manager.get_common_server())
common_sv_available = self._is_share_server_active(
context, share_server)
except Exception as ex:
LOG.error(ex)
if not common_sv_available:
time.sleep(sv_fetch_retry_interval)
LOG.warning(_LW("Waiting for the common service VM to become "
"available. "
"Driver is currently uninitialized. "
"Share server: %(share_server)s "
"Retry interval: %(retry_interval)s"),
dict(share_server=share_server,
retry_interval=sv_fetch_retry_interval))
def _setup_helpers(self):
helpers = self.configuration.share_helpers
if helpers:
for helper_str in helpers:
share_proto, __, import_str = helper_str.partition('=')
helper = importutils.import_class(import_str)
self._helpers[share_proto.upper()] = helper(
self._execute,
self._ssh_exec,
self.configuration)
else:
raise exception.ManilaException(
"No protocol helpers selected for Generic Driver. "
"Please specify using config option 'share_helpers'.")
@ensure_server
def create_share(self, context, share, share_server=None):
return self._create_share(
context, share,
snapshot=None,
share_server=share_server,
)
def _create_share(self, context, share, snapshot, share_server=None):
helper = self._get_helper(share)
server_details = share_server['backend_details']
volume = self._allocate_container(
self.admin_context, share, snapshot=snapshot)
volume = self._attach_volume(
self.admin_context, share, server_details['instance_id'], volume)
if not snapshot:
self._format_device(server_details, volume)
self._mount_device(share, server_details, volume)
export_locations = helper.create_exports(
server_details, share['name'])
return export_locations
@utils.retry(exception.ProcessExecutionError, backoff_rate=1)
def _is_device_file_available(self, server_details, volume):
command = ['sudo', 'test', '-b', volume['mountpoint']]
self._ssh_exec(server_details, command)
def _format_device(self, server_details, volume):
self._is_device_file_available(server_details, volume)
command = ['sudo', 'mkfs.%s' % self.configuration.share_volume_fstype,
volume['mountpoint']]
self._ssh_exec(server_details, command)
def _is_device_mounted(self, mount_path, server_details, volume=None):
log_data = {
'mount_path': mount_path,
'server_id': server_details['instance_id'],
}
if volume and volume.get('mountpoint', ''):
log_data['volume_id'] = volume['id']
log_data['dev_mount_path'] = volume['mountpoint']
msg = ("Checking whether volume '%(volume_id)s' with mountpoint "
"'%(dev_mount_path)s' is mounted on mount path '%(mount_p"
"ath)s' on server '%(server_id)s' or not." % log_data)
else:
msg = ("Checking whether mount path '%(mount_path)s' exists on "
"server '%(server_id)s' or not." % log_data)
LOG.debug(msg)
mounts_list_cmd = ['sudo', 'mount']
output, __ = self._ssh_exec(server_details, mounts_list_cmd)
mounts = output.split('\n')
for mount in mounts:
mount_elements = mount.split(' ')
if (len(mount_elements) > 2 and mount_path == mount_elements[2]):
if volume:
if (volume.get('mountpoint', '') == mount_elements[0]):
return True
else:
return True
return False
def _sync_mount_temp_and_perm_files(self, server_details):
try:
self._ssh_exec(
server_details,
['sudo', 'cp', const.MOUNT_FILE_TEMP, const.MOUNT_FILE],
)
except exception.ProcessExecutionError as e:
LOG.error(_LE("Failed to sync mount files on server '%s'."),
server_details['instance_id'])
raise exception.ShareBackendException(msg=six.text_type(e))
try:
self._ssh_exec(server_details, ['sudo', 'mount', '-a'])
except exception.ProcessExecutionError as e:
LOG.error(_LE("Failed to mount all shares on server '%s'."),
server_details['instance_id'])
raise exception.ShareBackendException(msg=six.text_type(e))
def _mount_device(self, share, server_details, volume):
@utils.synchronized('generic_driver_mounts_'
'%s' % server_details['instance_id'])
def _mount_device_with_lock():
mount_path = self._get_mount_path(share)
device_path = volume['mountpoint']
log_data = {
'dev': device_path,
'path': mount_path,
'server': server_details['instance_id'],
}
try:
if not self._is_device_mounted(mount_path, server_details,
volume):
LOG.debug("Mounting '%(dev)s' to path '%(path)s' on "
"server '%(server)s'.", log_data)
mount_cmd = (
'sudo', 'mkdir', '-p', mount_path,
'&&', 'sudo', 'mount', device_path, mount_path,
'&&', 'sudo', 'chmod', '777', mount_path,
'&&', 'sudo', 'umount', mount_path,
'&&', 'sudo', 'tune2fs', '-U', 'random', device_path,
'&&', 'sudo', 'mount', device_path, mount_path,
)
self._ssh_exec(server_details, mount_cmd)
self._sync_mount_temp_and_perm_files(server_details)
else:
LOG.warning(_LW("Mount point '%(path)s' already exists on "
"server '%(server)s'."), log_data)
except exception.ProcessExecutionError as e:
raise exception.ShareBackendException(msg=six.text_type(e))
return _mount_device_with_lock()
@utils.retry(exception.ProcessExecutionError)
def _unmount_device(self, share, server_details):
@utils.synchronized('generic_driver_mounts_'
'%s' % server_details['instance_id'])
def _unmount_device_with_lock():
mount_path = self._get_mount_path(share)
log_data = {
'path': mount_path,
'server': server_details['instance_id'],
}
if self._is_device_mounted(mount_path, server_details):
LOG.debug("Unmounting path '%(path)s' on server "
"'%(server)s'.", log_data)
unmount_cmd = ['sudo', 'umount', mount_path, '&&', 'sudo',
'rmdir', mount_path]
self._ssh_exec(server_details, unmount_cmd)
self._sync_mount_temp_and_perm_files(server_details)
else:
LOG.warning(_LW("Mount point '%(path)s' does not exist on "
"server '%(server)s'."), log_data)
return _unmount_device_with_lock()
def _get_mount_path(self, share):
return os.path.join(self.configuration.share_mount_path, share['name'])
def _attach_volume(self, context, share, instance_id, volume):
@utils.synchronized(
"generic_driver_attach_detach_%s" % instance_id, external=True)
def do_attach(volume):
if volume['status'] == 'in-use':
attached_volumes = [vol.id for vol in
self.compute_api.instance_volumes_list(
self.admin_context, instance_id)]
if volume['id'] in attached_volumes:
return volume
else:
raise exception.ManilaException(
_('Volume %s is already attached to another instance')
% volume['id'])
@retrying.retry(stop_max_attempt_number=3,
wait_fixed=2000,
retry_on_exception=lambda exc: True)
def attach_volume():
self.compute_api.instance_volume_attach(
self.admin_context, instance_id, volume['id'])
attach_volume()
t = time.time()
while time.time() - t < self.configuration.max_time_to_attach:
volume = self.volume_api.get(context, volume['id'])
if volume['status'] == 'in-use':
return volume
elif volume['status'] != 'attaching':
raise exception.ManilaException(
_('Failed to attach volume %s') % volume['id'])
time.sleep(1)
else:
err_msg = {
'volume_id': volume['id'],
'max_time': self.configuration.max_time_to_attach
}
raise exception.ManilaException(
_('Volume %(volume_id)s has not been attached in '
'%(max_time)ss. Giving up.') % err_msg)
return do_attach(volume)
def _get_volume_name(self, share_id):
return self.configuration.volume_name_template % share_id
def _get_volume(self, context, share_id):
volume_id = self.private_storage.get(share_id, 'volume_id')
if volume_id is not None:
return self.volume_api.get(context, volume_id)
else:
return self._get_volume_legacy(context, share_id)
def _get_volume_legacy(self, context, share_id):
volume_name = self._get_volume_name(share_id)
search_opts = {'name': volume_name}
if context.is_admin:
search_opts['all_tenants'] = True
volumes_list = self.volume_api.get_all(context, search_opts)
if len(volumes_list) == 1:
return volumes_list[0]
elif len(volumes_list) > 1:
LOG.error(
_LE("Expected only one volume in volume list with name "
"'%(name)s', but got more than one in a result - "
"'%(result)s'."), {
'name': volume_name, 'result': volumes_list})
raise exception.ManilaException(
_("Error. Ambiguous volumes for name '%s'") % volume_name)
return None
def _get_volume_snapshot(self, context, snapshot_id):
volume_snapshot_id = self.private_storage.get(
snapshot_id, 'volume_snapshot_id')
if volume_snapshot_id is not None:
return self.volume_api.get_snapshot(context, volume_snapshot_id)
else:
return self._get_volume_snapshot_legacy(context, snapshot_id)
def _get_volume_snapshot_legacy(self, context, snapshot_id):
volume_snapshot_name = (
self.configuration.volume_snapshot_name_template % snapshot_id)
volume_snapshot_list = self.volume_api.get_all_snapshots(
context, {'name': volume_snapshot_name})
volume_snapshot = None
if len(volume_snapshot_list) == 1:
volume_snapshot = volume_snapshot_list[0]
elif len(volume_snapshot_list) > 1:
LOG.error(
_LE("Expected only one volume snapshot in list with name "
"'%(name)s', but got more than one in a result - "
"'%(result)s'."), {
'name': volume_snapshot_name,
'result': volume_snapshot_list})
raise exception.ManilaException(
_('Error. Ambiguous volume snaphots'))
return volume_snapshot
def _detach_volume(self, context, share, server_details):
instance_id = server_details['instance_id']
@utils.synchronized(
"generic_driver_attach_detach_%s" % instance_id, external=True)
def do_detach():
attached_volumes = [vol.id for vol in
self.compute_api.instance_volumes_list(
self.admin_context, instance_id)]
try:
volume = self._get_volume(context, share['id'])
except exception.VolumeNotFound:
LOG.warning(_LW("Volume not found for share %s. "
"Possibly already deleted."), share['id'])
volume = None
if volume and volume['id'] in attached_volumes:
self.compute_api.instance_volume_detach(
self.admin_context,
instance_id,
volume['id']
)
t = time.time()
while time.time() - t < self.configuration.max_time_to_attach:
volume = self.volume_api.get(context, volume['id'])
if volume['status'] in (const.STATUS_AVAILABLE,
const.STATUS_ERROR):
break
time.sleep(1)
else:
err_msg = {
'volume_id': volume['id'],
'max_time': self.configuration.max_time_to_attach
}
raise exception.ManilaException(
_('Volume %(volume_id)s has not been detached in '
'%(max_time)ss. Giving up.') % err_msg)
do_detach()
def _allocate_container(self, context, share, snapshot=None):
volume_snapshot = None
if snapshot:
volume_snapshot = self._get_volume_snapshot(context,
snapshot['id'])
volume = self.volume_api.create(
context,
share['size'],
self.configuration.volume_name_template % share['id'], '',
snapshot=volume_snapshot,
volume_type=self.configuration.cinder_volume_type,
availability_zone=share['availability_zone'])
self.private_storage.update(
share['id'], {'volume_id': volume['id']})
msg_error = _('Failed to create volume')
msg_timeout = (
_('Volume has not been created in %ss. Giving up') %
self.configuration.max_time_to_create_volume
)
return self._wait_for_available_volume(
volume, self.configuration.max_time_to_create_volume,
msg_error=msg_error, msg_timeout=msg_timeout
)
def _wait_for_available_volume(self, volume, timeout,
msg_error, msg_timeout,
expected_size=None):
t = time.time()
while time.time() - t < timeout:
if volume['status'] == const.STATUS_AVAILABLE:
if expected_size and volume['size'] != expected_size:
LOG.debug("The volume %(vol_id)s is available but the "
"volume size does not match the expected size. "
"A volume resize operation may be pending. "
"Expected size: %(expected_size)s, "
"Actual size: %(volume_size)s.",
dict(vol_id=volume['id'],
expected_size=expected_size,
volume_size=volume['size']))
else:
break
elif 'error' in volume['status'].lower():
raise exception.ManilaException(msg_error)
time.sleep(1)
volume = self.volume_api.get(self.admin_context, volume['id'])
else:
raise exception.ManilaException(msg_timeout)
return volume
def _deallocate_container(self, context, share):
try:
volume = self._get_volume(context, share['id'])
except exception.VolumeNotFound:
LOG.info(_LI("Volume not found. Already deleted?"))
volume = None
if volume:
if volume['status'] == 'in-use':
raise exception.ManilaException(
_('Volume is still in use and '
'cannot be deleted now.'))
self.volume_api.delete(context, volume['id'])
t = time.time()
while (time.time() - t <
self.configuration.max_time_to_create_volume):
try:
volume = self.volume_api.get(context, volume['id'])
except exception.VolumeNotFound:
LOG.debug('Volume was deleted successfully')
break
time.sleep(1)
else:
raise exception.ManilaException(
_('Volume have not been '
'deleted in %ss. Giving up')
% self.configuration.max_time_to_create_volume)
def _update_share_stats(self):
data = dict(
share_backend_name=self.backend_name,
storage_protocol='NFS_CIFS',
reserved_percentage=self.configuration.reserved_share_percentage,
)
super(GenericShareDriver, self)._update_share_stats(data)
@ensure_server
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
return self._create_share(
context, share,
snapshot=snapshot,
share_server=share_server,
)
@ensure_server
def extend_share(self, share, new_size, share_server=None):
server_details = share_server['backend_details']
helper = self._get_helper(share)
helper.disable_access_for_maintenance(server_details, share['name'])
self._unmount_device(share, server_details)
volume = self._get_volume(self.admin_context, share['id'])
if int(new_size) > volume['size']:
self._detach_volume(self.admin_context, share, server_details)
volume = self._extend_volume(self.admin_context, volume, new_size)
volume = self._attach_volume(
self.admin_context,
share,
server_details['instance_id'],
volume)
self._resize_filesystem(server_details, volume)
self._mount_device(share, server_details, volume)
helper.restore_access_after_maintenance(server_details,
share['name'])
def _extend_volume(self, context, volume, new_size):
self.volume_api.extend(context, volume['id'], new_size)
msg_error = _('Failed to extend volume %s') % volume['id']
msg_timeout = (
_('Volume has not been extended in %ss. Giving up') %
self.configuration.max_time_to_extend_volume
)
return self._wait_for_available_volume(
volume, self.configuration.max_time_to_extend_volume,
msg_error=msg_error, msg_timeout=msg_timeout,
expected_size=new_size
)
@ensure_server
def shrink_share(self, share, new_size, share_server=None):
server_details = share_server['backend_details']
helper = self._get_helper(share)
export_location = share['export_locations'][0]['path']
mount_path = helper.get_share_path_by_export_location(
server_details, export_location)
consumed_space = self._get_consumed_space(mount_path, server_details)
LOG.debug("Consumed space on share: %s", consumed_space)
if consumed_space >= new_size:
raise exception.ShareShrinkingPossibleDataLoss(
share_id=share['id'])
volume = self._get_volume(self.admin_context, share['id'])
helper.disable_access_for_maintenance(server_details, share['name'])
self._unmount_device(share, server_details)
try:
self._resize_filesystem(server_details, volume, new_size=new_size)
except exception.Invalid:
raise exception.ShareShrinkingPossibleDataLoss(
share_id=share['id'])
except Exception as e:
msg = _("Cannot shrink share: %s") % six.text_type(e)
raise exception.Invalid(msg)
finally:
self._mount_device(share, server_details, volume)
helper.restore_access_after_maintenance(server_details,
share['name'])
def _resize_filesystem(self, server_details, volume, new_size=None):
check_command = ['sudo', 'fsck', '-pf', volume['mountpoint']]
self._ssh_exec(server_details, check_command)
command = ['sudo', 'resize2fs', volume['mountpoint']]
if new_size:
command.append("%sG" % six.text_type(new_size))
try:
self._ssh_exec(server_details, command)
except processutils.ProcessExecutionError as e:
if e.stderr.find('New size smaller than minimum') != -1:
msg = (_("Invalid 'new_size' provided: %s")
% six.text_type(new_size))
raise exception.Invalid(msg)
else:
msg = _("Cannot resize file-system: %s") % six.text_type(e)
raise exception.ManilaException(msg)
def _is_share_server_active(self, context, share_server):
has_active_share_server = (
share_server and share_server.get('backend_details') and
self.service_instance_manager.ensure_service_instance(
context, share_server['backend_details']))
return has_active_share_server
def delete_share(self, context, share, share_server=None):
helper = self._get_helper(share)
if not self.driver_handles_share_servers:
share_server = self.service_instance_manager.get_common_server()
if self._is_share_server_active(context, share_server):
helper.remove_exports(
share_server['backend_details'], share['name'])
self._unmount_device(share, share_server['backend_details'])
self._detach_volume(self.admin_context, share,
share_server['backend_details'])
self._deallocate_container(self.admin_context, share)
self.private_storage.delete(share['id'])
def create_snapshot(self, context, snapshot, share_server=None):
model_update = {}
volume = self._get_volume(self.admin_context, snapshot['share_id'])
volume_snapshot_name = (self.configuration.
volume_snapshot_name_template % snapshot['id'])
volume_snapshot = self.volume_api.create_snapshot_force(
self.admin_context, volume['id'], volume_snapshot_name, '')
t = time.time()
while time.time() - t < self.configuration.max_time_to_create_volume:
if volume_snapshot['status'] == const.STATUS_AVAILABLE:
break
if volume_snapshot['status'] == const.STATUS_ERROR:
raise exception.ManilaException(_('Failed to create volume '
'snapshot'))
time.sleep(1)
volume_snapshot = self.volume_api.get_snapshot(
self.admin_context,
volume_snapshot['id'])
self.private_storage.update(
snapshot['id'], {'volume_snapshot_id': volume_snapshot['id']})
model_update['provider_location'] = volume_snapshot['id']
else:
raise exception.ManilaException(
_('Volume snapshot have not been '
'created in %ss. Giving up') %
self.configuration.max_time_to_create_volume)
return model_update
def delete_snapshot(self, context, snapshot, share_server=None):
volume_snapshot = self._get_volume_snapshot(self.admin_context,
snapshot['id'])
if volume_snapshot is None:
return
self.volume_api.delete_snapshot(self.admin_context,
volume_snapshot['id'])
t = time.time()
while time.time() - t < self.configuration.max_time_to_create_volume:
try:
snapshot = self.volume_api.get_snapshot(self.admin_context,
volume_snapshot['id'])
except exception.VolumeSnapshotNotFound:
LOG.debug('Volume snapshot was deleted successfully')
self.private_storage.delete(snapshot['id'])
break
time.sleep(1)
else:
raise exception.ManilaException(
_('Volume snapshot have not been '
'deleted in %ss. Giving up') %
self.configuration.max_time_to_create_volume)
@ensure_server
def ensure_share(self, context, share, share_server=None):
helper = self._get_helper(share)
volume = self._get_volume(context, share['id'])
if volume:
volume = self._attach_volume(
context,
share,
share_server['backend_details']['instance_id'],
volume)
self._mount_device(share, share_server['backend_details'], volume)
helper.create_exports(
share_server['backend_details'], share['name'], recreate=True)
@ensure_server
def update_access(self, context, share, access_rules, add_rules,
delete_rules, share_server=None):
self._get_helper(share).update_access(share_server['backend_details'],
share['name'], access_rules,
add_rules=add_rules,
delete_rules=delete_rules)
def _get_helper(self, share):
helper = self._helpers.get(share['share_proto'])
if helper:
return helper
else:
raise exception.InvalidShare(
reason="Wrong, unsupported or disabled protocol")
def get_network_allocations_number(self):
return 0
def _setup_server(self, network_info, metadata=None):
msg = "Creating share server '%s'."
LOG.debug(msg % network_info['server_id'])
server = self.service_instance_manager.set_up_service_instance(
self.admin_context, network_info)
for helper in self._helpers.values():
helper.init_helper(server)
return server
def _teardown_server(self, server_details, security_services=None):
instance_id = server_details.get("instance_id")
LOG.debug("Removing share infrastructure for service instance '%s'.",
instance_id)
self.service_instance_manager.delete_service_instance(
self.admin_context, server_details)
def manage_existing(self, share, driver_options):
helper = self._get_helper(share)
share_server = self.service_instance_manager.get_common_server()
server_details = share_server['backend_details']
old_export_location = share['export_locations'][0]['path']
mount_path = helper.get_share_path_by_export_location(
share_server['backend_details'], old_export_location)
LOG.debug("Manage: mount path = %s", mount_path)
mounted = self._is_device_mounted(mount_path, server_details)
LOG.debug("Manage: is share mounted = %s", mounted)
if not mounted:
msg = _("Provided share %s is not mounted.") % share['id']
raise exception.ManageInvalidShare(reason=msg)
def get_volume():
if 'volume_id' in driver_options:
try:
return self.volume_api.get(
self.admin_context, driver_options['volume_id'])
except exception.VolumeNotFound as e:
raise exception.ManageInvalidShare(reason=six.text_type(e))
return None
share_volume = get_volume()
if share_volume:
instance_volumes = self.compute_api.instance_volumes_list(
self.admin_context, server_details['instance_id'])
attached_volumes = [vol.id for vol in instance_volumes]
LOG.debug('Manage: attached volumes = %s',
six.text_type(attached_volumes))
if share_volume['id'] not in attached_volumes:
msg = _("Provided volume %s is not attached "
"to service instance.") % share_volume['id']
raise exception.ManageInvalidShare(reason=msg)
linked_volume_name = self._get_volume_name(share['id'])
if share_volume['name'] != linked_volume_name:
LOG.debug('Manage: volume_id = %s' % share_volume['id'])
self.volume_api.update(self.admin_context, share_volume['id'],
{'name': linked_volume_name})
self.private_storage.update(
share['id'], {'volume_id': share_volume['id']})
share_size = share_volume['size']
else:
share_size = self._get_mounted_share_size(
mount_path, share_server['backend_details'])
export_locations = helper.get_exports_for_share(
server_details, old_export_location)
return {'size': share_size, 'export_locations': export_locations}
def manage_existing_snapshot(self, snapshot, driver_options):
model_update = {}
volume_snapshot = None
snapshot_size = snapshot.get('share_size', 0)
provider_location = snapshot.get('provider_location')
try:
volume_snapshot = self.volume_api.get_snapshot(
self.admin_context,
provider_location)
except exception.VolumeSnapshotNotFound as e:
raise exception.ManageInvalidShareSnapshot(
reason=six.text_type(e))
if volume_snapshot:
snapshot_size = volume_snapshot['size']
self.private_storage.update(
snapshot['id'], {'volume_snapshot_id': volume_snapshot['id']})
model_update['provider_location'] = volume_snapshot['id']
model_update['size'] = snapshot_size
return model_update
def unmanage_snapshot(self, snapshot):
self.private_storage.delete(snapshot['id'])
def _get_mount_stats_by_index(self, mount_path, server_details, index,
block_size='G'):
share_size_cmd = ['df', '-PB%s' % block_size, mount_path]
output, __ = self._ssh_exec(server_details, share_size_cmd)
lines = output.split('\n')
return int(lines[1].split()[index][:-1])
def _get_mounted_share_size(self, mount_path, server_details):
try:
size = self._get_mount_stats_by_index(
mount_path, server_details, BLOCK_DEVICE_SIZE_INDEX)
except Exception as e:
msg = _("Cannot calculate size of share %(path)s : %(error)s") % {
'path': mount_path,
'error': six.text_type(e)
}
raise exception.ManageInvalidShare(reason=msg)
return size
def _get_consumed_space(self, mount_path, server_details):
try:
size = self._get_mount_stats_by_index(
mount_path, server_details, USED_SPACE_INDEX, block_size='M')
size /= float(units.Ki)
except Exception as e:
msg = _("Cannot calculate consumed space on share "
"%(path)s : %(error)s") % {
'path': mount_path,
'error': six.text_type(e)
}
raise exception.InvalidShare(reason=msg)
return size
| true
| true
|
f7177eeda341fbfbb4601d6a8a82e1d73d1f95ba
| 17,754
|
py
|
Python
|
acg/custom_widgets/selection_widgets.py
|
david-fischer/Anki_CardGen
|
909d088ed4e98b97f65a2c896dc607941b00e4da
|
[
"MIT"
] | 2
|
2021-01-11T08:59:57.000Z
|
2021-02-01T12:15:30.000Z
|
acg/custom_widgets/selection_widgets.py
|
david-fischer/Anki_CardGen
|
909d088ed4e98b97f65a2c896dc607941b00e4da
|
[
"MIT"
] | null | null | null |
acg/custom_widgets/selection_widgets.py
|
david-fischer/Anki_CardGen
|
909d088ed4e98b97f65a2c896dc607941b00e4da
|
[
"MIT"
] | null | null | null |
"""Implements various elements to get user selection."""
from functools import partial
from kivy.animation import Animation
from kivy.factory import Factory
from kivy.lang import Builder
from kivy.properties import (
BooleanProperty,
ListProperty,
NumericProperty,
ObjectProperty,
OptionProperty,
StringProperty,
)
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.image import AsyncImage
from kivy.uix.modalview import ModalView
from kivy.uix.stacklayout import StackLayout
from kivymd.app import MDApp
from kivymd.theming import ThemableBehavior
from kivymd.uix.behaviors import CircularRippleBehavior, RectangularRippleBehavior
from kivymd.uix.card import MDCard
from kivymd.uix.imagelist import SmartTile
from .behaviors import (
CheckBehavior,
ChildrenFromDataBehavior,
LongPressBehavior,
ThemableColorChangeBehavior,
TranslationOnCheckBehavior,
)
class SeparatorWithHeading(FloatLayout):
r"""Two :class:`MDSeparator`\ s with a heading in between."""
heading = StringProperty("")
""":class:`~kivy.properties.StringProperty` with string used as heading."""
class CheckContainer(ChildrenFromDataBehavior):
"""Container for widgets with :class:`~custom_widgets.behaviors.CheckBehavior`."""
check_one = BooleanProperty(False)
""":class:`~kivy.properties.BooleanProperty` defaults to ``False``. If ``True`` only one child can be selected."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.child_bindings["current_state"] = self.conditional_uncheck
def conditional_uncheck(self, instance, value):
"""Uncheck other widgets if :attr:`check_one` is ``True``."""
if self.check_one:
for check_element in [
others for others in self.children if others != instance and value
]:
check_element.current_state = False
def get_checked(self, attribute_name=None):
"""
Return current selection.
Args:
attribute_name: Name of attribute to return. Defaults to ``None``.
Returns:
:* If ``attribute_name`` is None: List of selected children
* Else: List of attribute values
"""
checked_elements = [
element for element in self.children[::-1] if element.current_state
]
if attribute_name is None:
return checked_elements
return [
getattr(element, attribute_name) for element in checked_elements if element
]
class CheckCard(ThemableColorChangeBehavior, MDCard):
"""Selectable :~kivymd.uix.card.MDCard`. Select by click. Changes color on selection."""
text = StringProperty("test " * 15)
""":class:`~kivy.properties.StringProperty`."""
def on_press(self):
"""Change boolean value of :attr:`self.current_state`."""
self.current_state = ( # pylint: disable=attribute-defined-outside-init
not self.current_state
)
class CheckChip(
CircularRippleBehavior,
ButtonBehavior,
ThemableColorChangeBehavior,
BoxLayout,
):
"""Selectable Chip. Select by click. Change color on selection."""
icon = StringProperty("")
""":class:`~kivy.properties.StringProperty` defaults to ""."""
text = StringProperty("")
""":class:`~kivy.properties.StringProperty` defaults to ""."""
def on_press(self):
"""Change boolean value of :attr:`current_state`."""
self.current_state = ( # pylint: disable=attribute-defined-outside-init
not self.current_state
)
class TransChip(TranslationOnCheckBehavior, CheckChip):
"""Selectable Chip. Select by click. Change color and text on selection."""
class CheckChipContainer(CheckContainer, ThemableBehavior, StackLayout):
r"""Container for :class:`CheckChip`\ s. Use :attr:`child_dict` to populate."""
child_class_name = "CheckChip"
draw_box = BooleanProperty(False)
class CheckImageTile(CheckBehavior, SmartTile):
"""
Selectable :class:`~kivymd.uix.imagelist.SmartTile`.
Select by click. Changes :attr:`opacity` and :attr:`boarder_width` on selection.
"""
border_width = NumericProperty(0.01)
""":class:`~kivy.properties.NumericProperty` describing boarder-width of image tile."""
def __init__(self, **kwargs):
self.state_dicts = {
True: {"opacity": 1, "border_width": 3},
False: {"opacity": 0.8, "border_width": 0.01},
}
super().__init__(**kwargs)
def on_press(self):
"""Change boolean value of current state on press."""
self.current_state = ( # pylint: disable=attribute-defined-outside-init
not self.current_state
)
class TransCard(LongPressBehavior, MDCard, RectangularRippleBehavior):
"""Displays :attr:`text_orig` and :attr:`text_trans`, separated by a line."""
text_orig = StringProperty("")
""":class:`~kivy.properties.StringProperty` first text."""
text_trans = StringProperty("")
""":class:`~kivy.properties.StringProperty` second text."""
orientation = OptionProperty("vertical", options=["vertical", "horizontal"])
""":class:`~kivy.properties.OptionProperty` possible values ["vertical", "horizontal"] defaults to "vertical"."""
class LongPressImage(LongPressBehavior, AsyncImage):
""":class:`~kivy.uix.image.AsyncImage` with additional "on_press" and "on_long_press" event."""
Factory.register("LongPressImage", LongPressImage)
Factory.register("TransCard", TransCard)
class MyCarousel(FloatLayout, ChildrenFromDataBehavior):
"""
Carousel that constructs contents from :attr:`data`.
On click, opens a modal with list of content.
"""
carousel = ObjectProperty()
""":class:`~kivy.properties.ObjectProperty`"""
modal_layout_name = StringProperty()
""":class:`~kivy.properties.StringProperty`"""
modal_data_cls_name = StringProperty()
""":class:`~kivy.properties.StringProperty`"""
modal = ModalView()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.child_bindings = {
"height": self.update_height,
"on_press": self.open_menu,
}
self.on_data()
def update_num_children(self):
"""Add/remove children until correct number is reached."""
diff = len(self.data) - len(self.root_for_children.children) + 1
for _ in range(abs(diff)):
if diff > 0:
self.add_child()
else:
self.remove_child()
def on_data(self, *_):
"""Override :meth:`behaviors.ChildrenFromDataBehavior.on_data` with correct list of children.
The children are in ``carousel.slides`` as opposed to ``carousel.children``.
"""
if self.child_class_name:
self.update_num_children()
self.carousel.index = 1
for i, child_dict in enumerate(self.data, start=1):
for key, val in child_dict.items():
setattr(self.carousel.slides[i], key, val)
def remove_child(self):
"""Override :meth:`behaviors.ChildrenFromDataBehavior.remove_child` with correct list of children.
The children are in ``carousel.slides`` as opposed to ``carousel.children``.
"""
last_slide = self.carousel.slides[-1]
self.carousel.remove_widget(last_slide)
def before_add_child(self, child):
"""Bind :meth:`set_child_width` to change of :attr:`width`."""
self.bind(width=lambda *_: self.set_child_width(child))
def after_add_child(self, child):
"""Call :meth:`set_child_width` after adding child."""
self.set_child_width(child)
def set_child_width(self, child, *_):
"""Set width of child to :attr:`width` - width of left and right-icon."""
width = self.width - self.ids.left_icon.width - self.ids.right_icon.width
setattr(child, "width", width)
def update_height(self, *_):
"""Implement in sub class. Placeholder."""
def get_modal_content(self, size_hint=(1, None)):
"""Return root widget to display on the modal."""
def set_carousel_index(i, *_):
self.carousel.index = i
self.modal.dismiss()
data_dicts = [
{"size_hint": size_hint, "on_press": partial(set_carousel_index, 0)}
] + [
{**dict, "size_hint": size_hint, "on_press": partial(set_carousel_index, i)}
for i, dict in enumerate(self.data, start=1)
]
recycle_view_cls = Factory.get(self.modal_layout_name)
recycle_view = recycle_view_cls()
recycle_view.child_class_name = self.modal_data_cls_name
recycle_view.data = data_dicts
return recycle_view
def get_checked(self, attribute_name=None):
"""If ``attribute_name`` is ``None``, return currently selected widget, else return a property thereof."""
checked_elements = [self.carousel.current_slide]
if attribute_name is None:
return checked_elements
return [
getattr(element, attribute_name) for element in checked_elements if element
]
def open_menu(self, *_):
"""Open :class:`kivy.uix.modalview.ModalView` with content given by :meth:`get_modal_content`."""
self.modal = ModalView()
modal_content = self.get_modal_content()
self.modal.add_widget(modal_content)
self.modal.open()
class ImageCarousel(MyCarousel):
"""Carousel of images."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.child_bindings["on_error"] = lambda *_: self.dispatch("on_error", *_)
self.register_event_type("on_error")
self.on_data()
def get_modal_content(self, size_hint=(1, 1)):
"""Call :meth:`MyCarousel.get_modal_content` with ``size_hint=(1,1)``."""
return super().get_modal_content(size_hint=size_hint)
def on_error(self, *_):
"""Placeholder-function."""
class CardCarousel(MyCarousel):
"""
Carousel of :class:`TransCard`.
To use it with different objects, change :attr:`viewclass` and :attr:`modal_data_cls_name`.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
del self.child_bindings["on_press"]
def update_height(self, *_):
"""Update height via animation, so that Widget has height of currently displayed card."""
if self.carousel.current_slide:
new_height = self.carousel.current_slide.height + 24
if self.height != new_height:
anim = Animation(height=new_height, duration=0.2)
anim.start(self)
class RecycleCarousel(FloatLayout):
"""
Wrapper class for a :class:`~kivy.uix.carousel.Carousel` that uses only 3 slides to update content dynamically.
The :attr:`index` is updated according to the change of the carousel index and each time one of the slides is
updated with data from :attr:`data`. The content of the slides is constructed as instances of :attr:`viewclass`.
"""
carousel = ObjectProperty()
""":class:`kivy.properties.ObjectProperty` defaults to ``None``."""
viewclass = StringProperty("TransCard")
""":class:`kivy.properties.StringProperty` defaults to ``"TransCard"``. Class name of the widgets that are added
to the carousel."""
data = ListProperty()
""":class:`kivy.properties.ListProperty` defaults to ``None``. List of dictionaries from which the content is
generated."""
slide_width = NumericProperty()
""":class:`kivy.properties.NumericProperty` defaults to ``None``. Width that the content of the slides should
have."""
dynamic_height = BooleanProperty(False)
""":class:`kivy.properties.BooleanProperty` defaults to ``False``. If ``True`` updates the height of the root
widget to the height of the object on the current slide + 24. Only possible if size_hint_y of the widget on the
slide is not set."""
index = NumericProperty(0)
""":class:`kivy.properties.NumericProperty` defaults to ``0``. Current (virtual) index."""
last_carousel_index = NumericProperty(0)
""":class:`kivy.properties.NumericProperty` defaults to ``0``. Last index that the :attr:`carousel` had. Used to
determine whether the user did slide right or left."""
current_slide = ObjectProperty()
""":class:`kivy.properties.ObjectProperty`. Reference to :attr:`carousel`.current_slide."""
modal_layout_name = StringProperty()
""":class:`kivy.properties.StringProperty` defaults to ``None``. Class name for root widget of :attr:`modal`."""
modal_data_cls_name = StringProperty()
""":class:`kivy.properties.StringProperty` defaults to ``None``. Class name for children of :attr:`modal`."""
modal = ObjectProperty(ModalView())
""":class:`kivy.properties.ObjectProperty` defaults to ``ModalView()``."""
default_modal_size_hint = ListProperty([1, None])
def update_height(self, *_):
"""Update height via animation, so that Widget has height of currently displayed card."""
if self.dynamic_height:
new_height = self.carousel.current_slide.height + 24
if self.height != new_height:
anim = Animation(height=new_height, duration=0.3)
anim.start(self)
def setup_modal(self):
"""Return root widget to display on the modal."""
self.modal = ModalView()
modal_root_cls = Factory.get(self.modal_layout_name)
modal_root = modal_root_cls()
self.modal.add_widget(modal_root)
def _modal_child_callback(self, i, *_):
self.set_index(i)
self.modal.dismiss()
def update_modal_content(self):
"""Update content of modal."""
data_dicts = [
{
**dict,
"size_hint": self.default_modal_size_hint,
"on_press": partial(self._modal_child_callback, i),
}
for i, dict in enumerate(self.data)
]
self.modal.children[0].child_class_name = self.modal_data_cls_name
self.modal.children[0].data = data_dicts
def get_checked(self, attribute_name=None):
"""If ``attribute_name`` is ``None``, return currently selected widget, else return a property thereof."""
checked_elements = [self.carousel.current_slide]
if attribute_name is None:
return checked_elements
return [
getattr(element, attribute_name) for element in checked_elements if element
]
def open_menu(self, *_):
"""Open :class:`kivy.uix.modalview.ModalView` with content given by :meth:`setup_modal`."""
if not self.modal.children:
self.setup_modal()
self.update_modal_content()
self.modal.open()
def on_data(self, *_):
"""Set up :attr:`carousel` by initializing 3 widgets, adding them and binding some Properties."""
self.carousel.clear_widgets()
if len(self.data) >= 3:
for i in [0, 1, -1]:
widget = Factory.get(self.viewclass)(**self.data[i])
self.carousel.add_widget(widget)
self.bind(slide_width=widget.setter("width"))
widget.bind(on_press=self.open_menu)
widget.width = self.slide_width
self.carousel.register_event_type("on_index")
self.carousel.bind(index=self.update_index)
self.carousel.bind(current_slide=self.update_height)
self.carousel.current_slide.bind(height=self.update_height)
print("RecylceCarousel needs at least 3 elements to be displayed correctly.")
def update_index(self, _, carousel_index):
"""Change :attr:`index` according to change in ``carousel_index`` and update one of the three slides."""
diff = carousel_index - self.last_carousel_index
diff = -1 if diff == 2 else 1 if diff == -2 else diff
self.last_carousel_index = carousel_index
self.index = (self.index + diff) % len(self.data)
self.update_slide(carousel_index + diff, self.index + diff)
def update_slide(self, carousel_index, index):
"""
Update slide with index ``carousel_index`` by content from :attr:`data` [index].
Modulo function applied to indices guarantees values to be in the correct range.
"""
carousel_index %= 3
index %= len(self.data)
for name, val in self.data[index].items():
setattr(self.carousel.slides[carousel_index], name, val)
def set_index(self, index):
"""Set :attr:`index` to ``index`` and updates carousel accordingly."""
self.index = index
self.update_height()
for i in [0, 1, -1]:
self.update_slide((self.last_carousel_index + i) % 3, self.index + i)
# pylint: disable = W,C,R,I
if __name__ == "__main__":
CARD_CAROUSEL_STRING = (
"CardCarousel:\n"
' data: [{"text_orig":str(i)*50*i,"text_trans":"Trans"} for i in range(10)]'
)
RECYCLE_CAROUSEL_STRING = (
"RecycleCardCarousel:\n" # some comment
' data: [{"text_orig":str(i)*50*i,"text_trans":"Trans"} for i in range(10)]'
)
IMAGE_CAROUSEL_STRING = (
"ImageCarousel:\n"
' data: [{"source":"../assets/AnkiCardGen.png"} for _ in range(5)]'
)
class _TestApp(MDApp):
def build(self):
self.theme_cls.primary_palette = "Red" # "Purple", "Red"
self.theme_cls.theme_style = "Light" # "Purple", "Red"
return Builder.load_string(RECYCLE_CAROUSEL_STRING)
_TestApp().run()
| 37.694268
| 118
| 0.652529
|
from functools import partial
from kivy.animation import Animation
from kivy.factory import Factory
from kivy.lang import Builder
from kivy.properties import (
BooleanProperty,
ListProperty,
NumericProperty,
ObjectProperty,
OptionProperty,
StringProperty,
)
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.image import AsyncImage
from kivy.uix.modalview import ModalView
from kivy.uix.stacklayout import StackLayout
from kivymd.app import MDApp
from kivymd.theming import ThemableBehavior
from kivymd.uix.behaviors import CircularRippleBehavior, RectangularRippleBehavior
from kivymd.uix.card import MDCard
from kivymd.uix.imagelist import SmartTile
from .behaviors import (
CheckBehavior,
ChildrenFromDataBehavior,
LongPressBehavior,
ThemableColorChangeBehavior,
TranslationOnCheckBehavior,
)
class SeparatorWithHeading(FloatLayout):
heading = StringProperty("")
class CheckContainer(ChildrenFromDataBehavior):
check_one = BooleanProperty(False)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.child_bindings["current_state"] = self.conditional_uncheck
def conditional_uncheck(self, instance, value):
if self.check_one:
for check_element in [
others for others in self.children if others != instance and value
]:
check_element.current_state = False
def get_checked(self, attribute_name=None):
checked_elements = [
element for element in self.children[::-1] if element.current_state
]
if attribute_name is None:
return checked_elements
return [
getattr(element, attribute_name) for element in checked_elements if element
]
class CheckCard(ThemableColorChangeBehavior, MDCard):
text = StringProperty("test " * 15)
def on_press(self):
self.current_state = (
not self.current_state
)
class CheckChip(
CircularRippleBehavior,
ButtonBehavior,
ThemableColorChangeBehavior,
BoxLayout,
):
icon = StringProperty("")
text = StringProperty("")
def on_press(self):
self.current_state = (
not self.current_state
)
class TransChip(TranslationOnCheckBehavior, CheckChip):
class CheckChipContainer(CheckContainer, ThemableBehavior, StackLayout):
child_class_name = "CheckChip"
draw_box = BooleanProperty(False)
class CheckImageTile(CheckBehavior, SmartTile):
border_width = NumericProperty(0.01)
def __init__(self, **kwargs):
self.state_dicts = {
True: {"opacity": 1, "border_width": 3},
False: {"opacity": 0.8, "border_width": 0.01},
}
super().__init__(**kwargs)
def on_press(self):
self.current_state = (
not self.current_state
)
class TransCard(LongPressBehavior, MDCard, RectangularRippleBehavior):
text_orig = StringProperty("")
text_trans = StringProperty("")
orientation = OptionProperty("vertical", options=["vertical", "horizontal"])
class LongPressImage(LongPressBehavior, AsyncImage):
Factory.register("LongPressImage", LongPressImage)
Factory.register("TransCard", TransCard)
class MyCarousel(FloatLayout, ChildrenFromDataBehavior):
carousel = ObjectProperty()
modal_layout_name = StringProperty()
modal_data_cls_name = StringProperty()
modal = ModalView()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.child_bindings = {
"height": self.update_height,
"on_press": self.open_menu,
}
self.on_data()
def update_num_children(self):
diff = len(self.data) - len(self.root_for_children.children) + 1
for _ in range(abs(diff)):
if diff > 0:
self.add_child()
else:
self.remove_child()
def on_data(self, *_):
if self.child_class_name:
self.update_num_children()
self.carousel.index = 1
for i, child_dict in enumerate(self.data, start=1):
for key, val in child_dict.items():
setattr(self.carousel.slides[i], key, val)
def remove_child(self):
last_slide = self.carousel.slides[-1]
self.carousel.remove_widget(last_slide)
def before_add_child(self, child):
self.bind(width=lambda *_: self.set_child_width(child))
def after_add_child(self, child):
self.set_child_width(child)
def set_child_width(self, child, *_):
width = self.width - self.ids.left_icon.width - self.ids.right_icon.width
setattr(child, "width", width)
def update_height(self, *_):
def get_modal_content(self, size_hint=(1, None)):
def set_carousel_index(i, *_):
self.carousel.index = i
self.modal.dismiss()
data_dicts = [
{"size_hint": size_hint, "on_press": partial(set_carousel_index, 0)}
] + [
{**dict, "size_hint": size_hint, "on_press": partial(set_carousel_index, i)}
for i, dict in enumerate(self.data, start=1)
]
recycle_view_cls = Factory.get(self.modal_layout_name)
recycle_view = recycle_view_cls()
recycle_view.child_class_name = self.modal_data_cls_name
recycle_view.data = data_dicts
return recycle_view
def get_checked(self, attribute_name=None):
checked_elements = [self.carousel.current_slide]
if attribute_name is None:
return checked_elements
return [
getattr(element, attribute_name) for element in checked_elements if element
]
def open_menu(self, *_):
self.modal = ModalView()
modal_content = self.get_modal_content()
self.modal.add_widget(modal_content)
self.modal.open()
class ImageCarousel(MyCarousel):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.child_bindings["on_error"] = lambda *_: self.dispatch("on_error", *_)
self.register_event_type("on_error")
self.on_data()
def get_modal_content(self, size_hint=(1, 1)):
return super().get_modal_content(size_hint=size_hint)
def on_error(self, *_):
class CardCarousel(MyCarousel):
def __init__(self, **kwargs):
super().__init__(**kwargs)
del self.child_bindings["on_press"]
def update_height(self, *_):
if self.carousel.current_slide:
new_height = self.carousel.current_slide.height + 24
if self.height != new_height:
anim = Animation(height=new_height, duration=0.2)
anim.start(self)
class RecycleCarousel(FloatLayout):
carousel = ObjectProperty()
viewclass = StringProperty("TransCard")
data = ListProperty()
slide_width = NumericProperty()
dynamic_height = BooleanProperty(False)
index = NumericProperty(0)
last_carousel_index = NumericProperty(0)
current_slide = ObjectProperty()
modal_layout_name = StringProperty()
modal_data_cls_name = StringProperty()
modal = ObjectProperty(ModalView())
default_modal_size_hint = ListProperty([1, None])
def update_height(self, *_):
if self.dynamic_height:
new_height = self.carousel.current_slide.height + 24
if self.height != new_height:
anim = Animation(height=new_height, duration=0.3)
anim.start(self)
def setup_modal(self):
self.modal = ModalView()
modal_root_cls = Factory.get(self.modal_layout_name)
modal_root = modal_root_cls()
self.modal.add_widget(modal_root)
def _modal_child_callback(self, i, *_):
self.set_index(i)
self.modal.dismiss()
def update_modal_content(self):
data_dicts = [
{
**dict,
"size_hint": self.default_modal_size_hint,
"on_press": partial(self._modal_child_callback, i),
}
for i, dict in enumerate(self.data)
]
self.modal.children[0].child_class_name = self.modal_data_cls_name
self.modal.children[0].data = data_dicts
def get_checked(self, attribute_name=None):
checked_elements = [self.carousel.current_slide]
if attribute_name is None:
return checked_elements
return [
getattr(element, attribute_name) for element in checked_elements if element
]
def open_menu(self, *_):
if not self.modal.children:
self.setup_modal()
self.update_modal_content()
self.modal.open()
def on_data(self, *_):
self.carousel.clear_widgets()
if len(self.data) >= 3:
for i in [0, 1, -1]:
widget = Factory.get(self.viewclass)(**self.data[i])
self.carousel.add_widget(widget)
self.bind(slide_width=widget.setter("width"))
widget.bind(on_press=self.open_menu)
widget.width = self.slide_width
self.carousel.register_event_type("on_index")
self.carousel.bind(index=self.update_index)
self.carousel.bind(current_slide=self.update_height)
self.carousel.current_slide.bind(height=self.update_height)
print("RecylceCarousel needs at least 3 elements to be displayed correctly.")
def update_index(self, _, carousel_index):
diff = carousel_index - self.last_carousel_index
diff = -1 if diff == 2 else 1 if diff == -2 else diff
self.last_carousel_index = carousel_index
self.index = (self.index + diff) % len(self.data)
self.update_slide(carousel_index + diff, self.index + diff)
def update_slide(self, carousel_index, index):
carousel_index %= 3
index %= len(self.data)
for name, val in self.data[index].items():
setattr(self.carousel.slides[carousel_index], name, val)
def set_index(self, index):
self.index = index
self.update_height()
for i in [0, 1, -1]:
self.update_slide((self.last_carousel_index + i) % 3, self.index + i)
if __name__ == "__main__":
CARD_CAROUSEL_STRING = (
"CardCarousel:\n"
' data: [{"text_orig":str(i)*50*i,"text_trans":"Trans"} for i in range(10)]'
)
RECYCLE_CAROUSEL_STRING = (
"RecycleCardCarousel:\n"
' data: [{"text_orig":str(i)*50*i,"text_trans":"Trans"} for i in range(10)]'
)
IMAGE_CAROUSEL_STRING = (
"ImageCarousel:\n"
' data: [{"source":"../assets/AnkiCardGen.png"} for _ in range(5)]'
)
class _TestApp(MDApp):
def build(self):
self.theme_cls.primary_palette = "Red"
self.theme_cls.theme_style = "Light"
return Builder.load_string(RECYCLE_CAROUSEL_STRING)
_TestApp().run()
| true
| true
|
f7177f17985f47452311b910b6f0dc8fb2631393
| 276
|
py
|
Python
|
packaging/setup/plugins/ovirt-engine-setup/ovirt_imageio/__init__.py
|
jihwahn1018/ovirt-engine
|
5c8a3d9a9637eefb28e4accc3cbd2b7f530d5ec9
|
[
"Apache-2.0"
] | 347
|
2015-01-20T14:13:21.000Z
|
2022-03-31T17:53:11.000Z
|
packaging/setup/plugins/ovirt-engine-setup/ovirt_imageio/__init__.py
|
jihwahn1018/ovirt-engine
|
5c8a3d9a9637eefb28e4accc3cbd2b7f530d5ec9
|
[
"Apache-2.0"
] | 128
|
2015-05-22T19:14:32.000Z
|
2022-03-31T08:11:18.000Z
|
packaging/setup/plugins/ovirt-engine-setup/ovirt_imageio/__init__.py
|
jihwahn1018/ovirt-engine
|
5c8a3d9a9637eefb28e4accc3cbd2b7f530d5ec9
|
[
"Apache-2.0"
] | 202
|
2015-01-04T06:20:49.000Z
|
2022-03-08T15:30:08.000Z
|
#
# ovirt-engine-setup -- ovirt engine setup
#
# Copyright oVirt Authors
# SPDX-License-Identifier: Apache-2.0
#
#
"""ovirt-imageio setup plugin."""
from otopi import util
from . import config
@util.export
def createPlugins(context):
config.Plugin(context=context)
| 13.142857
| 42
| 0.721014
|
from otopi import util
from . import config
@util.export
def createPlugins(context):
config.Plugin(context=context)
| true
| true
|
f71780030f07f5124673cea42eacb5d831327dea
| 543
|
py
|
Python
|
manage.py
|
arpitmisraw/Effervescence18-Website
|
3c510f066986af80aaced566b32c5040310d3107
|
[
"MIT"
] | 2
|
2018-06-27T20:46:16.000Z
|
2018-08-02T11:02:26.000Z
|
manage.py
|
arpitmisraw/Effervescence18-Website
|
3c510f066986af80aaced566b32c5040310d3107
|
[
"MIT"
] | 3
|
2020-06-05T18:15:37.000Z
|
2021-06-10T20:20:29.000Z
|
manage.py
|
arpitmisraw/Effervescence18-Website
|
3c510f066986af80aaced566b32c5040310d3107
|
[
"MIT"
] | 1
|
2019-08-01T12:06:35.000Z
|
2019-08-01T12:06:35.000Z
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "effe_portal.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.9375
| 75
| 0.688766
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "effe_portal.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| true
| true
|
f71780c4b08f54dd66b5a1991c6159621e4cec1f
| 18,071
|
py
|
Python
|
test/cpython/test_audioop.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2020-02-06T14:28:45.000Z
|
2020-02-06T14:28:45.000Z
|
test/cpython/test_audioop.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
test/cpython/test_audioop.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2020-02-06T14:29:00.000Z
|
2020-02-06T14:29:00.000Z
|
# expected: fail
import audioop
import sys
import unittest
import struct
from test.test_support import run_unittest
formats = {
1: 'b',
2: 'h',
4: 'i',
}
def pack(width, data):
return struct.pack('=%d%s' % (len(data), formats[width]), *data)
packs = {
1: lambda *data: pack(1, data),
2: lambda *data: pack(2, data),
4: lambda *data: pack(4, data),
}
maxvalues = {w: (1 << (8 * w - 1)) - 1 for w in (1, 2, 4)}
minvalues = {w: -1 << (8 * w - 1) for w in (1, 2, 4)}
datas = {
1: b'\x00\x12\x45\xbb\x7f\x80\xff',
2: packs[2](0, 0x1234, 0x4567, -0x4567, 0x7fff, -0x8000, -1),
4: packs[4](0, 0x12345678, 0x456789ab, -0x456789ab,
0x7fffffff, -0x80000000, -1),
}
INVALID_DATA = [
(b'abc', 0),
(b'abc', 2),
(b'abc', 4),
]
class TestAudioop(unittest.TestCase):
def test_max(self):
for w in 1, 2, 4:
self.assertEqual(audioop.max(b'', w), 0)
p = packs[w]
self.assertEqual(audioop.max(p(5), w), 5)
self.assertEqual(audioop.max(p(5, -8, -1), w), 8)
self.assertEqual(audioop.max(p(maxvalues[w]), w), maxvalues[w])
self.assertEqual(audioop.max(p(minvalues[w]), w), -minvalues[w])
self.assertEqual(audioop.max(datas[w], w), -minvalues[w])
def test_minmax(self):
for w in 1, 2, 4:
self.assertEqual(audioop.minmax(b'', w),
(0x7fffffff, -0x80000000))
p = packs[w]
self.assertEqual(audioop.minmax(p(5), w), (5, 5))
self.assertEqual(audioop.minmax(p(5, -8, -1), w), (-8, 5))
self.assertEqual(audioop.minmax(p(maxvalues[w]), w),
(maxvalues[w], maxvalues[w]))
self.assertEqual(audioop.minmax(p(minvalues[w]), w),
(minvalues[w], minvalues[w]))
self.assertEqual(audioop.minmax(datas[w], w),
(minvalues[w], maxvalues[w]))
def test_maxpp(self):
for w in 1, 2, 4:
self.assertEqual(audioop.maxpp(b'', w), 0)
self.assertEqual(audioop.maxpp(packs[w](*range(100)), w), 0)
self.assertEqual(audioop.maxpp(packs[w](9, 10, 5, 5, 0, 1), w), 10)
self.assertEqual(audioop.maxpp(datas[w], w),
maxvalues[w] - minvalues[w])
def test_avg(self):
for w in 1, 2, 4:
self.assertEqual(audioop.avg(b'', w), 0)
p = packs[w]
self.assertEqual(audioop.avg(p(5), w), 5)
self .assertEqual(audioop.avg(p(5, 8), w), 6)
self.assertEqual(audioop.avg(p(5, -8), w), -2)
self.assertEqual(audioop.avg(p(maxvalues[w], maxvalues[w]), w),
maxvalues[w])
self.assertEqual(audioop.avg(p(minvalues[w], minvalues[w]), w),
minvalues[w])
self.assertEqual(audioop.avg(packs[4](0x50000000, 0x70000000), 4),
0x60000000)
self.assertEqual(audioop.avg(packs[4](-0x50000000, -0x70000000), 4),
-0x60000000)
def test_avgpp(self):
for w in 1, 2, 4:
self.assertEqual(audioop.avgpp(b'', w), 0)
self.assertEqual(audioop.avgpp(packs[w](*range(100)), w), 0)
self.assertEqual(audioop.avgpp(packs[w](9, 10, 5, 5, 0, 1), w), 10)
self.assertEqual(audioop.avgpp(datas[1], 1), 196)
self.assertEqual(audioop.avgpp(datas[2], 2), 50534)
self.assertEqual(audioop.avgpp(datas[4], 4), 3311897002)
def test_rms(self):
for w in 1, 2, 4:
self.assertEqual(audioop.rms(b'', w), 0)
p = packs[w]
self.assertEqual(audioop.rms(p(*range(100)), w), 57)
self.assertAlmostEqual(audioop.rms(p(maxvalues[w]) * 5, w),
maxvalues[w], delta=1)
self.assertAlmostEqual(audioop.rms(p(minvalues[w]) * 5, w),
-minvalues[w], delta=1)
self.assertEqual(audioop.rms(datas[1], 1), 77)
self.assertEqual(audioop.rms(datas[2], 2), 20001)
self.assertEqual(audioop.rms(datas[4], 4), 1310854152)
def test_cross(self):
for w in 1, 2, 4:
self.assertEqual(audioop.cross(b'', w), -1)
p = packs[w]
self.assertEqual(audioop.cross(p(0, 1, 2), w), 0)
self.assertEqual(audioop.cross(p(1, 2, -3, -4), w), 1)
self.assertEqual(audioop.cross(p(-1, -2, 3, 4), w), 1)
self.assertEqual(audioop.cross(p(0, minvalues[w]), w), 1)
self.assertEqual(audioop.cross(p(minvalues[w], maxvalues[w]), w), 1)
def test_add(self):
for w in 1, 2, 4:
self.assertEqual(audioop.add(b'', b'', w), b'')
self.assertEqual(audioop.add(datas[w], b'\0' * len(datas[w]), w),
datas[w])
self.assertEqual(audioop.add(datas[1], datas[1], 1),
b'\x00\x24\x7f\x80\x7f\x80\xfe')
self.assertEqual(audioop.add(datas[2], datas[2], 2),
packs[2](0, 0x2468, 0x7fff, -0x8000, 0x7fff, -0x8000, -2))
self.assertEqual(audioop.add(datas[4], datas[4], 4),
packs[4](0, 0x2468acf0, 0x7fffffff, -0x80000000,
0x7fffffff, -0x80000000, -2))
def test_bias(self):
for w in 1, 2, 4:
for bias in 0, 1, -1, 127, -128, 0x7fffffff, -0x80000000:
self.assertEqual(audioop.bias(b'', w, bias), b'')
self.assertEqual(audioop.bias(datas[1], 1, 1),
b'\x01\x13\x46\xbc\x80\x81\x00')
self.assertEqual(audioop.bias(datas[1], 1, -1),
b'\xff\x11\x44\xba\x7e\x7f\xfe')
self.assertEqual(audioop.bias(datas[1], 1, 0x7fffffff),
b'\xff\x11\x44\xba\x7e\x7f\xfe')
self.assertEqual(audioop.bias(datas[1], 1, -0x80000000),
datas[1])
self.assertEqual(audioop.bias(datas[2], 2, 1),
packs[2](1, 0x1235, 0x4568, -0x4566, -0x8000, -0x7fff, 0))
self.assertEqual(audioop.bias(datas[2], 2, -1),
packs[2](-1, 0x1233, 0x4566, -0x4568, 0x7ffe, 0x7fff, -2))
self.assertEqual(audioop.bias(datas[2], 2, 0x7fffffff),
packs[2](-1, 0x1233, 0x4566, -0x4568, 0x7ffe, 0x7fff, -2))
self.assertEqual(audioop.bias(datas[2], 2, -0x80000000),
datas[2])
self.assertEqual(audioop.bias(datas[4], 4, 1),
packs[4](1, 0x12345679, 0x456789ac, -0x456789aa,
-0x80000000, -0x7fffffff, 0))
self.assertEqual(audioop.bias(datas[4], 4, -1),
packs[4](-1, 0x12345677, 0x456789aa, -0x456789ac,
0x7ffffffe, 0x7fffffff, -2))
self.assertEqual(audioop.bias(datas[4], 4, 0x7fffffff),
packs[4](0x7fffffff, -0x6dcba989, -0x3a987656, 0x3a987654,
-2, -1, 0x7ffffffe))
self.assertEqual(audioop.bias(datas[4], 4, -0x80000000),
packs[4](-0x80000000, -0x6dcba988, -0x3a987655, 0x3a987655,
-1, 0, 0x7fffffff))
def test_lin2lin(self):
for w in 1, 2, 4:
self.assertEqual(audioop.lin2lin(datas[w], w, w), datas[w])
self.assertEqual(audioop.lin2lin(datas[1], 1, 2),
packs[2](0, 0x1200, 0x4500, -0x4500, 0x7f00, -0x8000, -0x100))
self.assertEqual(audioop.lin2lin(datas[1], 1, 4),
packs[4](0, 0x12000000, 0x45000000, -0x45000000,
0x7f000000, -0x80000000, -0x1000000))
self.assertEqual(audioop.lin2lin(datas[2], 2, 1),
b'\x00\x12\x45\xba\x7f\x80\xff')
self.assertEqual(audioop.lin2lin(datas[2], 2, 4),
packs[4](0, 0x12340000, 0x45670000, -0x45670000,
0x7fff0000, -0x80000000, -0x10000))
self.assertEqual(audioop.lin2lin(datas[4], 4, 1),
b'\x00\x12\x45\xba\x7f\x80\xff')
self.assertEqual(audioop.lin2lin(datas[4], 4, 2),
packs[2](0, 0x1234, 0x4567, -0x4568, 0x7fff, -0x8000, -1))
def test_adpcm2lin(self):
self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 1, None),
(b'\x00\x00\x00\xff\x00\xff', (-179, 40)))
self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 2, None),
(packs[2](0, 0xb, 0x29, -0x16, 0x72, -0xb3), (-179, 40)))
self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 4, None),
(packs[4](0, 0xb0000, 0x290000, -0x160000, 0x720000,
-0xb30000), (-179, 40)))
# Very cursory test
for w in 1, 2, 4:
self.assertEqual(audioop.adpcm2lin(b'\0' * 5, w, None),
(b'\0' * w * 10, (0, 0)))
def test_lin2adpcm(self):
self.assertEqual(audioop.lin2adpcm(datas[1], 1, None),
(b'\x07\x7f\x7f', (-221, 39)))
self.assertEqual(audioop.lin2adpcm(datas[2], 2, None),
(b'\x07\x7f\x7f', (31, 39)))
self.assertEqual(audioop.lin2adpcm(datas[4], 4, None),
(b'\x07\x7f\x7f', (31, 39)))
# Very cursory test
for w in 1, 2, 4:
self.assertEqual(audioop.lin2adpcm(b'\0' * w * 10, w, None),
(b'\0' * 5, (0, 0)))
def test_lin2alaw(self):
self.assertEqual(audioop.lin2alaw(datas[1], 1),
b'\xd5\x87\xa4\x24\xaa\x2a\x5a')
self.assertEqual(audioop.lin2alaw(datas[2], 2),
b'\xd5\x87\xa4\x24\xaa\x2a\x55')
self.assertEqual(audioop.lin2alaw(datas[4], 4),
b'\xd5\x87\xa4\x24\xaa\x2a\x55')
def test_alaw2lin(self):
encoded = b'\x00\x03\x24\x2a\x51\x54\x55\x58\x6b\x71\x7f'\
b'\x80\x83\xa4\xaa\xd1\xd4\xd5\xd8\xeb\xf1\xff'
src = [-688, -720, -2240, -4032, -9, -3, -1, -27, -244, -82, -106,
688, 720, 2240, 4032, 9, 3, 1, 27, 244, 82, 106]
for w in 1, 2, 4:
self.assertEqual(audioop.alaw2lin(encoded, w),
packs[w](*(x << (w * 8) >> 13 for x in src)))
encoded = ''.join(chr(x) for x in xrange(256))
for w in 2, 4:
decoded = audioop.alaw2lin(encoded, w)
self.assertEqual(audioop.lin2alaw(decoded, w), encoded)
def test_lin2ulaw(self):
self.assertEqual(audioop.lin2ulaw(datas[1], 1),
b'\xff\xad\x8e\x0e\x80\x00\x67')
self.assertEqual(audioop.lin2ulaw(datas[2], 2),
b'\xff\xad\x8e\x0e\x80\x00\x7e')
self.assertEqual(audioop.lin2ulaw(datas[4], 4),
b'\xff\xad\x8e\x0e\x80\x00\x7e')
def test_ulaw2lin(self):
encoded = b'\x00\x0e\x28\x3f\x57\x6a\x76\x7c\x7e\x7f'\
b'\x80\x8e\xa8\xbf\xd7\xea\xf6\xfc\xfe\xff'
src = [-8031, -4447, -1471, -495, -163, -53, -18, -6, -2, 0,
8031, 4447, 1471, 495, 163, 53, 18, 6, 2, 0]
for w in 1, 2, 4:
self.assertEqual(audioop.ulaw2lin(encoded, w),
packs[w](*(x << (w * 8) >> 14 for x in src)))
# Current u-law implementation has two codes fo 0: 0x7f and 0xff.
encoded = ''.join(chr(x) for x in range(127) + range(128, 256))
for w in 2, 4:
decoded = audioop.ulaw2lin(encoded, w)
self.assertEqual(audioop.lin2ulaw(decoded, w), encoded)
def test_mul(self):
for w in 1, 2, 4:
self.assertEqual(audioop.mul(b'', w, 2), b'')
self.assertEqual(audioop.mul(datas[w], w, 0),
b'\0' * len(datas[w]))
self.assertEqual(audioop.mul(datas[w], w, 1),
datas[w])
self.assertEqual(audioop.mul(datas[1], 1, 2),
b'\x00\x24\x7f\x80\x7f\x80\xfe')
self.assertEqual(audioop.mul(datas[2], 2, 2),
packs[2](0, 0x2468, 0x7fff, -0x8000, 0x7fff, -0x8000, -2))
self.assertEqual(audioop.mul(datas[4], 4, 2),
packs[4](0, 0x2468acf0, 0x7fffffff, -0x80000000,
0x7fffffff, -0x80000000, -2))
def test_ratecv(self):
for w in 1, 2, 4:
self.assertEqual(audioop.ratecv(b'', w, 1, 8000, 8000, None),
(b'', (-1, ((0, 0),))))
self.assertEqual(audioop.ratecv(b'', w, 5, 8000, 8000, None),
(b'', (-1, ((0, 0),) * 5)))
self.assertEqual(audioop.ratecv(b'', w, 1, 8000, 16000, None),
(b'', (-2, ((0, 0),))))
self.assertEqual(audioop.ratecv(datas[w], w, 1, 8000, 8000, None)[0],
datas[w])
state = None
d1, state = audioop.ratecv(b'\x00\x01\x02', 1, 1, 8000, 16000, state)
d2, state = audioop.ratecv(b'\x00\x01\x02', 1, 1, 8000, 16000, state)
self.assertEqual(d1 + d2, b'\000\000\001\001\002\001\000\000\001\001\002')
for w in 1, 2, 4:
d0, state0 = audioop.ratecv(datas[w], w, 1, 8000, 16000, None)
d, state = b'', None
for i in range(0, len(datas[w]), w):
d1, state = audioop.ratecv(datas[w][i:i + w], w, 1,
8000, 16000, state)
d += d1
self.assertEqual(d, d0)
self.assertEqual(state, state0)
def test_reverse(self):
for w in 1, 2, 4:
self.assertEqual(audioop.reverse(b'', w), b'')
self.assertEqual(audioop.reverse(packs[w](0, 1, 2), w),
packs[w](2, 1, 0))
def test_tomono(self):
for w in 1, 2, 4:
data1 = datas[w]
data2 = bytearray(2 * len(data1))
for k in range(w):
data2[k::2*w] = data1[k::w]
self.assertEqual(audioop.tomono(str(data2), w, 1, 0), data1)
self.assertEqual(audioop.tomono(str(data2), w, 0, 1), b'\0' * len(data1))
for k in range(w):
data2[k+w::2*w] = data1[k::w]
self.assertEqual(audioop.tomono(str(data2), w, 0.5, 0.5), data1)
def test_tostereo(self):
for w in 1, 2, 4:
data1 = datas[w]
data2 = bytearray(2 * len(data1))
for k in range(w):
data2[k::2*w] = data1[k::w]
self.assertEqual(audioop.tostereo(data1, w, 1, 0), data2)
self.assertEqual(audioop.tostereo(data1, w, 0, 0), b'\0' * len(data2))
for k in range(w):
data2[k+w::2*w] = data1[k::w]
self.assertEqual(audioop.tostereo(data1, w, 1, 1), data2)
def test_findfactor(self):
self.assertEqual(audioop.findfactor(datas[2], datas[2]), 1.0)
self.assertEqual(audioop.findfactor(b'\0' * len(datas[2]), datas[2]),
0.0)
def test_findfit(self):
self.assertEqual(audioop.findfit(datas[2], datas[2]), (0, 1.0))
self.assertEqual(audioop.findfit(datas[2], packs[2](1, 2, 0)),
(1, 8038.8))
self.assertEqual(audioop.findfit(datas[2][:-2] * 5 + datas[2], datas[2]),
(30, 1.0))
def test_findmax(self):
self.assertEqual(audioop.findmax(datas[2], 1), 5)
def test_getsample(self):
for w in 1, 2, 4:
data = packs[w](0, 1, -1, maxvalues[w], minvalues[w])
self.assertEqual(audioop.getsample(data, w, 0), 0)
self.assertEqual(audioop.getsample(data, w, 1), 1)
self.assertEqual(audioop.getsample(data, w, 2), -1)
self.assertEqual(audioop.getsample(data, w, 3), maxvalues[w])
self.assertEqual(audioop.getsample(data, w, 4), minvalues[w])
def test_negativelen(self):
# from issue 3306, previously it segfaulted
self.assertRaises(audioop.error,
audioop.findmax, ''.join( chr(x) for x in xrange(256)), -2392392)
def test_issue7673(self):
state = None
for data, size in INVALID_DATA:
size2 = size
self.assertRaises(audioop.error, audioop.getsample, data, size, 0)
self.assertRaises(audioop.error, audioop.max, data, size)
self.assertRaises(audioop.error, audioop.minmax, data, size)
self.assertRaises(audioop.error, audioop.avg, data, size)
self.assertRaises(audioop.error, audioop.rms, data, size)
self.assertRaises(audioop.error, audioop.avgpp, data, size)
self.assertRaises(audioop.error, audioop.maxpp, data, size)
self.assertRaises(audioop.error, audioop.cross, data, size)
self.assertRaises(audioop.error, audioop.mul, data, size, 1.0)
self.assertRaises(audioop.error, audioop.tomono, data, size, 0.5, 0.5)
self.assertRaises(audioop.error, audioop.tostereo, data, size, 0.5, 0.5)
self.assertRaises(audioop.error, audioop.add, data, data, size)
self.assertRaises(audioop.error, audioop.bias, data, size, 0)
self.assertRaises(audioop.error, audioop.reverse, data, size)
self.assertRaises(audioop.error, audioop.lin2lin, data, size, size2)
self.assertRaises(audioop.error, audioop.ratecv, data, size, 1, 1, 1, state)
self.assertRaises(audioop.error, audioop.lin2ulaw, data, size)
self.assertRaises(audioop.error, audioop.lin2alaw, data, size)
self.assertRaises(audioop.error, audioop.lin2adpcm, data, size, state)
def test_wrongsize(self):
data = b'abcdefgh'
state = None
for size in (-1, 0, 3, 5, 1024):
self.assertRaises(audioop.error, audioop.ulaw2lin, data, size)
self.assertRaises(audioop.error, audioop.alaw2lin, data, size)
self.assertRaises(audioop.error, audioop.adpcm2lin, data, size, state)
def test_main():
run_unittest(TestAudioop)
if __name__ == '__main__':
test_main()
| 45.749367
| 88
| 0.531791
|
import audioop
import sys
import unittest
import struct
from test.test_support import run_unittest
formats = {
1: 'b',
2: 'h',
4: 'i',
}
def pack(width, data):
return struct.pack('=%d%s' % (len(data), formats[width]), *data)
packs = {
1: lambda *data: pack(1, data),
2: lambda *data: pack(2, data),
4: lambda *data: pack(4, data),
}
maxvalues = {w: (1 << (8 * w - 1)) - 1 for w in (1, 2, 4)}
minvalues = {w: -1 << (8 * w - 1) for w in (1, 2, 4)}
datas = {
1: b'\x00\x12\x45\xbb\x7f\x80\xff',
2: packs[2](0, 0x1234, 0x4567, -0x4567, 0x7fff, -0x8000, -1),
4: packs[4](0, 0x12345678, 0x456789ab, -0x456789ab,
0x7fffffff, -0x80000000, -1),
}
INVALID_DATA = [
(b'abc', 0),
(b'abc', 2),
(b'abc', 4),
]
class TestAudioop(unittest.TestCase):
def test_max(self):
for w in 1, 2, 4:
self.assertEqual(audioop.max(b'', w), 0)
p = packs[w]
self.assertEqual(audioop.max(p(5), w), 5)
self.assertEqual(audioop.max(p(5, -8, -1), w), 8)
self.assertEqual(audioop.max(p(maxvalues[w]), w), maxvalues[w])
self.assertEqual(audioop.max(p(minvalues[w]), w), -minvalues[w])
self.assertEqual(audioop.max(datas[w], w), -minvalues[w])
def test_minmax(self):
for w in 1, 2, 4:
self.assertEqual(audioop.minmax(b'', w),
(0x7fffffff, -0x80000000))
p = packs[w]
self.assertEqual(audioop.minmax(p(5), w), (5, 5))
self.assertEqual(audioop.minmax(p(5, -8, -1), w), (-8, 5))
self.assertEqual(audioop.minmax(p(maxvalues[w]), w),
(maxvalues[w], maxvalues[w]))
self.assertEqual(audioop.minmax(p(minvalues[w]), w),
(minvalues[w], minvalues[w]))
self.assertEqual(audioop.minmax(datas[w], w),
(minvalues[w], maxvalues[w]))
def test_maxpp(self):
for w in 1, 2, 4:
self.assertEqual(audioop.maxpp(b'', w), 0)
self.assertEqual(audioop.maxpp(packs[w](*range(100)), w), 0)
self.assertEqual(audioop.maxpp(packs[w](9, 10, 5, 5, 0, 1), w), 10)
self.assertEqual(audioop.maxpp(datas[w], w),
maxvalues[w] - minvalues[w])
def test_avg(self):
for w in 1, 2, 4:
self.assertEqual(audioop.avg(b'', w), 0)
p = packs[w]
self.assertEqual(audioop.avg(p(5), w), 5)
self .assertEqual(audioop.avg(p(5, 8), w), 6)
self.assertEqual(audioop.avg(p(5, -8), w), -2)
self.assertEqual(audioop.avg(p(maxvalues[w], maxvalues[w]), w),
maxvalues[w])
self.assertEqual(audioop.avg(p(minvalues[w], minvalues[w]), w),
minvalues[w])
self.assertEqual(audioop.avg(packs[4](0x50000000, 0x70000000), 4),
0x60000000)
self.assertEqual(audioop.avg(packs[4](-0x50000000, -0x70000000), 4),
-0x60000000)
def test_avgpp(self):
for w in 1, 2, 4:
self.assertEqual(audioop.avgpp(b'', w), 0)
self.assertEqual(audioop.avgpp(packs[w](*range(100)), w), 0)
self.assertEqual(audioop.avgpp(packs[w](9, 10, 5, 5, 0, 1), w), 10)
self.assertEqual(audioop.avgpp(datas[1], 1), 196)
self.assertEqual(audioop.avgpp(datas[2], 2), 50534)
self.assertEqual(audioop.avgpp(datas[4], 4), 3311897002)
def test_rms(self):
for w in 1, 2, 4:
self.assertEqual(audioop.rms(b'', w), 0)
p = packs[w]
self.assertEqual(audioop.rms(p(*range(100)), w), 57)
self.assertAlmostEqual(audioop.rms(p(maxvalues[w]) * 5, w),
maxvalues[w], delta=1)
self.assertAlmostEqual(audioop.rms(p(minvalues[w]) * 5, w),
-minvalues[w], delta=1)
self.assertEqual(audioop.rms(datas[1], 1), 77)
self.assertEqual(audioop.rms(datas[2], 2), 20001)
self.assertEqual(audioop.rms(datas[4], 4), 1310854152)
def test_cross(self):
for w in 1, 2, 4:
self.assertEqual(audioop.cross(b'', w), -1)
p = packs[w]
self.assertEqual(audioop.cross(p(0, 1, 2), w), 0)
self.assertEqual(audioop.cross(p(1, 2, -3, -4), w), 1)
self.assertEqual(audioop.cross(p(-1, -2, 3, 4), w), 1)
self.assertEqual(audioop.cross(p(0, minvalues[w]), w), 1)
self.assertEqual(audioop.cross(p(minvalues[w], maxvalues[w]), w), 1)
def test_add(self):
for w in 1, 2, 4:
self.assertEqual(audioop.add(b'', b'', w), b'')
self.assertEqual(audioop.add(datas[w], b'\0' * len(datas[w]), w),
datas[w])
self.assertEqual(audioop.add(datas[1], datas[1], 1),
b'\x00\x24\x7f\x80\x7f\x80\xfe')
self.assertEqual(audioop.add(datas[2], datas[2], 2),
packs[2](0, 0x2468, 0x7fff, -0x8000, 0x7fff, -0x8000, -2))
self.assertEqual(audioop.add(datas[4], datas[4], 4),
packs[4](0, 0x2468acf0, 0x7fffffff, -0x80000000,
0x7fffffff, -0x80000000, -2))
def test_bias(self):
for w in 1, 2, 4:
for bias in 0, 1, -1, 127, -128, 0x7fffffff, -0x80000000:
self.assertEqual(audioop.bias(b'', w, bias), b'')
self.assertEqual(audioop.bias(datas[1], 1, 1),
b'\x01\x13\x46\xbc\x80\x81\x00')
self.assertEqual(audioop.bias(datas[1], 1, -1),
b'\xff\x11\x44\xba\x7e\x7f\xfe')
self.assertEqual(audioop.bias(datas[1], 1, 0x7fffffff),
b'\xff\x11\x44\xba\x7e\x7f\xfe')
self.assertEqual(audioop.bias(datas[1], 1, -0x80000000),
datas[1])
self.assertEqual(audioop.bias(datas[2], 2, 1),
packs[2](1, 0x1235, 0x4568, -0x4566, -0x8000, -0x7fff, 0))
self.assertEqual(audioop.bias(datas[2], 2, -1),
packs[2](-1, 0x1233, 0x4566, -0x4568, 0x7ffe, 0x7fff, -2))
self.assertEqual(audioop.bias(datas[2], 2, 0x7fffffff),
packs[2](-1, 0x1233, 0x4566, -0x4568, 0x7ffe, 0x7fff, -2))
self.assertEqual(audioop.bias(datas[2], 2, -0x80000000),
datas[2])
self.assertEqual(audioop.bias(datas[4], 4, 1),
packs[4](1, 0x12345679, 0x456789ac, -0x456789aa,
-0x80000000, -0x7fffffff, 0))
self.assertEqual(audioop.bias(datas[4], 4, -1),
packs[4](-1, 0x12345677, 0x456789aa, -0x456789ac,
0x7ffffffe, 0x7fffffff, -2))
self.assertEqual(audioop.bias(datas[4], 4, 0x7fffffff),
packs[4](0x7fffffff, -0x6dcba989, -0x3a987656, 0x3a987654,
-2, -1, 0x7ffffffe))
self.assertEqual(audioop.bias(datas[4], 4, -0x80000000),
packs[4](-0x80000000, -0x6dcba988, -0x3a987655, 0x3a987655,
-1, 0, 0x7fffffff))
def test_lin2lin(self):
for w in 1, 2, 4:
self.assertEqual(audioop.lin2lin(datas[w], w, w), datas[w])
self.assertEqual(audioop.lin2lin(datas[1], 1, 2),
packs[2](0, 0x1200, 0x4500, -0x4500, 0x7f00, -0x8000, -0x100))
self.assertEqual(audioop.lin2lin(datas[1], 1, 4),
packs[4](0, 0x12000000, 0x45000000, -0x45000000,
0x7f000000, -0x80000000, -0x1000000))
self.assertEqual(audioop.lin2lin(datas[2], 2, 1),
b'\x00\x12\x45\xba\x7f\x80\xff')
self.assertEqual(audioop.lin2lin(datas[2], 2, 4),
packs[4](0, 0x12340000, 0x45670000, -0x45670000,
0x7fff0000, -0x80000000, -0x10000))
self.assertEqual(audioop.lin2lin(datas[4], 4, 1),
b'\x00\x12\x45\xba\x7f\x80\xff')
self.assertEqual(audioop.lin2lin(datas[4], 4, 2),
packs[2](0, 0x1234, 0x4567, -0x4568, 0x7fff, -0x8000, -1))
def test_adpcm2lin(self):
self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 1, None),
(b'\x00\x00\x00\xff\x00\xff', (-179, 40)))
self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 2, None),
(packs[2](0, 0xb, 0x29, -0x16, 0x72, -0xb3), (-179, 40)))
self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 4, None),
(packs[4](0, 0xb0000, 0x290000, -0x160000, 0x720000,
-0xb30000), (-179, 40)))
for w in 1, 2, 4:
self.assertEqual(audioop.adpcm2lin(b'\0' * 5, w, None),
(b'\0' * w * 10, (0, 0)))
def test_lin2adpcm(self):
self.assertEqual(audioop.lin2adpcm(datas[1], 1, None),
(b'\x07\x7f\x7f', (-221, 39)))
self.assertEqual(audioop.lin2adpcm(datas[2], 2, None),
(b'\x07\x7f\x7f', (31, 39)))
self.assertEqual(audioop.lin2adpcm(datas[4], 4, None),
(b'\x07\x7f\x7f', (31, 39)))
for w in 1, 2, 4:
self.assertEqual(audioop.lin2adpcm(b'\0' * w * 10, w, None),
(b'\0' * 5, (0, 0)))
def test_lin2alaw(self):
self.assertEqual(audioop.lin2alaw(datas[1], 1),
b'\xd5\x87\xa4\x24\xaa\x2a\x5a')
self.assertEqual(audioop.lin2alaw(datas[2], 2),
b'\xd5\x87\xa4\x24\xaa\x2a\x55')
self.assertEqual(audioop.lin2alaw(datas[4], 4),
b'\xd5\x87\xa4\x24\xaa\x2a\x55')
def test_alaw2lin(self):
encoded = b'\x00\x03\x24\x2a\x51\x54\x55\x58\x6b\x71\x7f'\
b'\x80\x83\xa4\xaa\xd1\xd4\xd5\xd8\xeb\xf1\xff'
src = [-688, -720, -2240, -4032, -9, -3, -1, -27, -244, -82, -106,
688, 720, 2240, 4032, 9, 3, 1, 27, 244, 82, 106]
for w in 1, 2, 4:
self.assertEqual(audioop.alaw2lin(encoded, w),
packs[w](*(x << (w * 8) >> 13 for x in src)))
encoded = ''.join(chr(x) for x in xrange(256))
for w in 2, 4:
decoded = audioop.alaw2lin(encoded, w)
self.assertEqual(audioop.lin2alaw(decoded, w), encoded)
def test_lin2ulaw(self):
self.assertEqual(audioop.lin2ulaw(datas[1], 1),
b'\xff\xad\x8e\x0e\x80\x00\x67')
self.assertEqual(audioop.lin2ulaw(datas[2], 2),
b'\xff\xad\x8e\x0e\x80\x00\x7e')
self.assertEqual(audioop.lin2ulaw(datas[4], 4),
b'\xff\xad\x8e\x0e\x80\x00\x7e')
def test_ulaw2lin(self):
encoded = b'\x00\x0e\x28\x3f\x57\x6a\x76\x7c\x7e\x7f'\
b'\x80\x8e\xa8\xbf\xd7\xea\xf6\xfc\xfe\xff'
src = [-8031, -4447, -1471, -495, -163, -53, -18, -6, -2, 0,
8031, 4447, 1471, 495, 163, 53, 18, 6, 2, 0]
for w in 1, 2, 4:
self.assertEqual(audioop.ulaw2lin(encoded, w),
packs[w](*(x << (w * 8) >> 14 for x in src)))
encoded = ''.join(chr(x) for x in range(127) + range(128, 256))
for w in 2, 4:
decoded = audioop.ulaw2lin(encoded, w)
self.assertEqual(audioop.lin2ulaw(decoded, w), encoded)
def test_mul(self):
for w in 1, 2, 4:
self.assertEqual(audioop.mul(b'', w, 2), b'')
self.assertEqual(audioop.mul(datas[w], w, 0),
b'\0' * len(datas[w]))
self.assertEqual(audioop.mul(datas[w], w, 1),
datas[w])
self.assertEqual(audioop.mul(datas[1], 1, 2),
b'\x00\x24\x7f\x80\x7f\x80\xfe')
self.assertEqual(audioop.mul(datas[2], 2, 2),
packs[2](0, 0x2468, 0x7fff, -0x8000, 0x7fff, -0x8000, -2))
self.assertEqual(audioop.mul(datas[4], 4, 2),
packs[4](0, 0x2468acf0, 0x7fffffff, -0x80000000,
0x7fffffff, -0x80000000, -2))
def test_ratecv(self):
for w in 1, 2, 4:
self.assertEqual(audioop.ratecv(b'', w, 1, 8000, 8000, None),
(b'', (-1, ((0, 0),))))
self.assertEqual(audioop.ratecv(b'', w, 5, 8000, 8000, None),
(b'', (-1, ((0, 0),) * 5)))
self.assertEqual(audioop.ratecv(b'', w, 1, 8000, 16000, None),
(b'', (-2, ((0, 0),))))
self.assertEqual(audioop.ratecv(datas[w], w, 1, 8000, 8000, None)[0],
datas[w])
state = None
d1, state = audioop.ratecv(b'\x00\x01\x02', 1, 1, 8000, 16000, state)
d2, state = audioop.ratecv(b'\x00\x01\x02', 1, 1, 8000, 16000, state)
self.assertEqual(d1 + d2, b'\000\000\001\001\002\001\000\000\001\001\002')
for w in 1, 2, 4:
d0, state0 = audioop.ratecv(datas[w], w, 1, 8000, 16000, None)
d, state = b'', None
for i in range(0, len(datas[w]), w):
d1, state = audioop.ratecv(datas[w][i:i + w], w, 1,
8000, 16000, state)
d += d1
self.assertEqual(d, d0)
self.assertEqual(state, state0)
def test_reverse(self):
for w in 1, 2, 4:
self.assertEqual(audioop.reverse(b'', w), b'')
self.assertEqual(audioop.reverse(packs[w](0, 1, 2), w),
packs[w](2, 1, 0))
def test_tomono(self):
for w in 1, 2, 4:
data1 = datas[w]
data2 = bytearray(2 * len(data1))
for k in range(w):
data2[k::2*w] = data1[k::w]
self.assertEqual(audioop.tomono(str(data2), w, 1, 0), data1)
self.assertEqual(audioop.tomono(str(data2), w, 0, 1), b'\0' * len(data1))
for k in range(w):
data2[k+w::2*w] = data1[k::w]
self.assertEqual(audioop.tomono(str(data2), w, 0.5, 0.5), data1)
def test_tostereo(self):
for w in 1, 2, 4:
data1 = datas[w]
data2 = bytearray(2 * len(data1))
for k in range(w):
data2[k::2*w] = data1[k::w]
self.assertEqual(audioop.tostereo(data1, w, 1, 0), data2)
self.assertEqual(audioop.tostereo(data1, w, 0, 0), b'\0' * len(data2))
for k in range(w):
data2[k+w::2*w] = data1[k::w]
self.assertEqual(audioop.tostereo(data1, w, 1, 1), data2)
def test_findfactor(self):
self.assertEqual(audioop.findfactor(datas[2], datas[2]), 1.0)
self.assertEqual(audioop.findfactor(b'\0' * len(datas[2]), datas[2]),
0.0)
def test_findfit(self):
self.assertEqual(audioop.findfit(datas[2], datas[2]), (0, 1.0))
self.assertEqual(audioop.findfit(datas[2], packs[2](1, 2, 0)),
(1, 8038.8))
self.assertEqual(audioop.findfit(datas[2][:-2] * 5 + datas[2], datas[2]),
(30, 1.0))
def test_findmax(self):
self.assertEqual(audioop.findmax(datas[2], 1), 5)
def test_getsample(self):
for w in 1, 2, 4:
data = packs[w](0, 1, -1, maxvalues[w], minvalues[w])
self.assertEqual(audioop.getsample(data, w, 0), 0)
self.assertEqual(audioop.getsample(data, w, 1), 1)
self.assertEqual(audioop.getsample(data, w, 2), -1)
self.assertEqual(audioop.getsample(data, w, 3), maxvalues[w])
self.assertEqual(audioop.getsample(data, w, 4), minvalues[w])
def test_negativelen(self):
self.assertRaises(audioop.error,
audioop.findmax, ''.join( chr(x) for x in xrange(256)), -2392392)
def test_issue7673(self):
state = None
for data, size in INVALID_DATA:
size2 = size
self.assertRaises(audioop.error, audioop.getsample, data, size, 0)
self.assertRaises(audioop.error, audioop.max, data, size)
self.assertRaises(audioop.error, audioop.minmax, data, size)
self.assertRaises(audioop.error, audioop.avg, data, size)
self.assertRaises(audioop.error, audioop.rms, data, size)
self.assertRaises(audioop.error, audioop.avgpp, data, size)
self.assertRaises(audioop.error, audioop.maxpp, data, size)
self.assertRaises(audioop.error, audioop.cross, data, size)
self.assertRaises(audioop.error, audioop.mul, data, size, 1.0)
self.assertRaises(audioop.error, audioop.tomono, data, size, 0.5, 0.5)
self.assertRaises(audioop.error, audioop.tostereo, data, size, 0.5, 0.5)
self.assertRaises(audioop.error, audioop.add, data, data, size)
self.assertRaises(audioop.error, audioop.bias, data, size, 0)
self.assertRaises(audioop.error, audioop.reverse, data, size)
self.assertRaises(audioop.error, audioop.lin2lin, data, size, size2)
self.assertRaises(audioop.error, audioop.ratecv, data, size, 1, 1, 1, state)
self.assertRaises(audioop.error, audioop.lin2ulaw, data, size)
self.assertRaises(audioop.error, audioop.lin2alaw, data, size)
self.assertRaises(audioop.error, audioop.lin2adpcm, data, size, state)
def test_wrongsize(self):
data = b'abcdefgh'
state = None
for size in (-1, 0, 3, 5, 1024):
self.assertRaises(audioop.error, audioop.ulaw2lin, data, size)
self.assertRaises(audioop.error, audioop.alaw2lin, data, size)
self.assertRaises(audioop.error, audioop.adpcm2lin, data, size, state)
def test_main():
run_unittest(TestAudioop)
if __name__ == '__main__':
test_main()
| true
| true
|
f7178158b29d68a175b0ebeab0012d4e31f2f3e6
| 2,610
|
py
|
Python
|
boofuzz/requests/http_post.py
|
youngcraft/boofuzz-modbus
|
bfeb48345b56797b48079e0620e7b06b27085789
|
[
"Apache-2.0"
] | 23
|
2018-08-11T12:12:33.000Z
|
2022-01-28T10:22:49.000Z
|
boofuzz/requests/http_post.py
|
ctf-fuzzer/boofuzz-modbus
|
bfeb48345b56797b48079e0620e7b06b27085789
|
[
"Apache-2.0"
] | 2
|
2018-07-24T15:15:40.000Z
|
2020-07-12T13:06:56.000Z
|
boofuzz/requests/http_post.py
|
ctf-fuzzer/boofuzz-modbus
|
bfeb48345b56797b48079e0620e7b06b27085789
|
[
"Apache-2.0"
] | 10
|
2018-04-02T13:21:36.000Z
|
2022-01-17T09:20:27.000Z
|
from boofuzz import *
# All POST mimetypes that I could think of/find
# List of all blocks defined here (for easy copy/paste)
"""
sess.connect(s_get("HTTP VERBS POST"))
sess.connect(s_get("HTTP VERBS POST ALL"))
sess.connect(s_get("HTTP VERBS POST REQ"))
"""
# Fuzz POST requests with most MIMETypes known
s_initialize("HTTP VERBS POST ALL")
s_static("POST / HTTP/1.1\r\n")
s_static("Content-Type: ")
s_group("mimetypes", values=[
"audio/basic",
"audio/x-mpeg",
"drawing/x-dwf",
"graphics/x-inventor",
"image/x-portable-bitmap",
"message/external-body",
"message/http",
"message/news",
"message/partial",
"message/rfc822",
"multipart/alternative",
"multipart/appledouble",
"multipart/digest",
"multipart/form-data",
"multipart/header-set",
"multipart/mixed",
"multipart/parallel",
"multipart/related",
"multipart/report",
"multipart/voice-message",
"multipart/x-mixed-replace",
"text/css",
"text/enriched",
"text/html",
"text/javascript",
"text/plain",
"text/richtext",
"text/sgml",
"text/tab-separated-values",
"text/vbscript",
"video/x-msvideo",
"video/x-sgi-movie",
"workbook/formulaone",
"x-conference/x-cooltalk",
"x-form/x-openscape",
"x-music/x-midi",
"x-script/x-wfxclient",
"x-world/x-3dmf"
])
if s_block_start("mime", group="mimetypes"):
s_static("\r\n")
s_static("Content-Length: ")
s_size("post blob", output_format="ascii", signed=True, fuzzable=True)
s_static("\r\n\r\n")
s_block_end()
if s_block_start("post blob"):
s_string("A" * 100 + "=" + "B" * 100)
s_block_end()
s_static("\r\n\r\n")
# Basic fuzz of post payloads
s_initialize("HTTP VERBS POST")
s_static("POST / HTTP/1.1\r\n")
s_static("Content-Type: ")
s_string("application/x-www-form-urlencoded")
s_static("\r\n")
s_static("Content-Length: ")
s_size("post blob", output_format="ascii", signed=True, fuzzable=True)
s_static("\r\n")
if s_block_start("post blob"):
s_string("A" * 100 + "=" + "B" * 100)
s_block_end()
s_static("\r\n\r\n")
# Fuzz POST request MIMETypes
s_initialize("HTTP VERBS POST REQ")
s_static("POST / HTTP/1.1\r\n")
s_static("Content-Type: ")
s_string("application")
s_delim("/")
s_string("x")
s_delim("-")
s_string("www")
s_delim("-")
s_string("form")
s_delim("-")
s_string("urlencoded")
s_static("\r\n")
s_static("Content-Length: ")
s_size("post blob", output_format="ascii", signed=True, fuzzable=True)
s_static("\r\n")
if s_block_start("post blob"):
s_string("A" * 100 + "=" + "B" * 100)
s_block_end()
s_static("\r\n\r\n")
| 23.944954
| 74
| 0.650958
|
from boofuzz import *
s_initialize("HTTP VERBS POST ALL")
s_static("POST / HTTP/1.1\r\n")
s_static("Content-Type: ")
s_group("mimetypes", values=[
"audio/basic",
"audio/x-mpeg",
"drawing/x-dwf",
"graphics/x-inventor",
"image/x-portable-bitmap",
"message/external-body",
"message/http",
"message/news",
"message/partial",
"message/rfc822",
"multipart/alternative",
"multipart/appledouble",
"multipart/digest",
"multipart/form-data",
"multipart/header-set",
"multipart/mixed",
"multipart/parallel",
"multipart/related",
"multipart/report",
"multipart/voice-message",
"multipart/x-mixed-replace",
"text/css",
"text/enriched",
"text/html",
"text/javascript",
"text/plain",
"text/richtext",
"text/sgml",
"text/tab-separated-values",
"text/vbscript",
"video/x-msvideo",
"video/x-sgi-movie",
"workbook/formulaone",
"x-conference/x-cooltalk",
"x-form/x-openscape",
"x-music/x-midi",
"x-script/x-wfxclient",
"x-world/x-3dmf"
])
if s_block_start("mime", group="mimetypes"):
s_static("\r\n")
s_static("Content-Length: ")
s_size("post blob", output_format="ascii", signed=True, fuzzable=True)
s_static("\r\n\r\n")
s_block_end()
if s_block_start("post blob"):
s_string("A" * 100 + "=" + "B" * 100)
s_block_end()
s_static("\r\n\r\n")
s_initialize("HTTP VERBS POST")
s_static("POST / HTTP/1.1\r\n")
s_static("Content-Type: ")
s_string("application/x-www-form-urlencoded")
s_static("\r\n")
s_static("Content-Length: ")
s_size("post blob", output_format="ascii", signed=True, fuzzable=True)
s_static("\r\n")
if s_block_start("post blob"):
s_string("A" * 100 + "=" + "B" * 100)
s_block_end()
s_static("\r\n\r\n")
s_initialize("HTTP VERBS POST REQ")
s_static("POST / HTTP/1.1\r\n")
s_static("Content-Type: ")
s_string("application")
s_delim("/")
s_string("x")
s_delim("-")
s_string("www")
s_delim("-")
s_string("form")
s_delim("-")
s_string("urlencoded")
s_static("\r\n")
s_static("Content-Length: ")
s_size("post blob", output_format="ascii", signed=True, fuzzable=True)
s_static("\r\n")
if s_block_start("post blob"):
s_string("A" * 100 + "=" + "B" * 100)
s_block_end()
s_static("\r\n\r\n")
| true
| true
|
f71781cb54b95efe9a515ff1e8acbb559ba8adb6
| 70,600
|
py
|
Python
|
pyspeckit/spectrum/readers/read_class.py
|
FaceThePirate/pyspeckit
|
734b9f81d440ca3a6db9bf68e9409dbddb52d08b
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
pyspeckit/spectrum/readers/read_class.py
|
FaceThePirate/pyspeckit
|
734b9f81d440ca3a6db9bf68e9409dbddb52d08b
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
pyspeckit/spectrum/readers/read_class.py
|
FaceThePirate/pyspeckit
|
734b9f81d440ca3a6db9bf68e9409dbddb52d08b
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
"""
------------------------
GILDAS CLASS file reader
------------------------
Read a CLASS file into an :class:`pyspeckit.spectrum.ObsBlock`
"""
from __future__ import print_function
from six.moves import xrange
from six import iteritems
import six
import astropy.io.fits as pyfits
import numpy
import numpy as np
from numpy import pi
from astropy import log
# from astropy.time import Time
from astropy import units as u
import pyspeckit
import sys
import re
try:
from astropy.utils.console import ProgressBar
except ImportError:
ProgressBar = lambda x: None
ProgressBar.update = lambda x: None
import struct
import time
# 'range' is needed as a keyword
irange = range
def print_timing(func):
"""
Prints execution time of decorated function.
Included here because CLASS files can take a little while to read;
this should probably be replaced with a progressbar
"""
def wrapper(*arg,**kwargs):
t1 = time.time()
res = func(*arg,**kwargs)
t2 = time.time()
log.info('%s took %0.5g s' % (func.__name__, (t2-t1)))
return res
wrapper.__doc__ = func.__doc__
return wrapper
def ensure_bytes(string):
"""
Ensure a given string is in byte form
"""
if six.PY3:
return bytes(string, 'utf-8')
else:
return str(string)
""" Specification: http://iram.fr/IRAMFR/GILDAS/doc/html/class-html/node58.html """
filetype_dict = {'1A ':'Multiple_IEEE',
'1 ':'Multiple_Vax',
'1B ':'Multiple_EEEI',
'2A ':'v2',
'2 ':'v2',
'2B ':'v2',
'9A ':'Single_IEEE',
'9 ':'Single_Vax',
'9B ':'Single_EEEI'}
for key in list(filetype_dict.keys()):
filetype_dict[ensure_bytes(key)] = filetype_dict[key]
fileversion_dict = {'1A ':'v1',
'2A ':'v2',
'9A ':'v1', # untested
}
for key in list(fileversion_dict.keys()):
fileversion_dict[ensure_bytes(key)] = fileversion_dict[key]
record_lengths = {'1A': 512,
'2A': 1024*4}
header_id_numbers = {0: 'USER CODE',
-1: 'COMMENT',
-2: 'GENERAL',
-3: 'POSITION',
-4: 'SPECTRO',
-5: 'BASELINE',
-6: 'HISTORY',
-7: 'UNKNOWN-APEX',
# -8: 'SWITCH',
-9: 'GAUSSFIT', # "private"; see class-interfaces-private.f90
-10: 'DRIFT',
-11: 'BEAMSWITCH', # "private"; see class-interfaces-private.f90
-12: 'SHELLFIT', # "private"; see class-interfaces-private.f90
-13: 'NH3FIT', # "private"; see class-interfaces-private.f90
-14: 'CALIBRATION',
-18: 'ABSFIT', # "private"; see class-interfaces-private.f90
}
header_id_lengths = {-2: 9, # may really be 10?
-3: 17,
-4: 17,
-5: None, # variable length
-6: 3, # variable length
-14: 25,
}
# from packages/classic/lib/classic_mod.f90
filedescv2_nw1=14
"""
GENERAL
integer(kind=obsnum_length) :: num ! [ ] Observation number
integer(kind=4) :: ver ! [ ] Version number
integer(kind=4) :: teles(3) ! [ ] Telescope name
integer(kind=4) :: dobs ! [MJD-60549] Date of observation
integer(kind=4) :: dred ! [MJD-60549] Date of reduction
integer(kind=4) :: typec ! [ code] Type of coordinates
integer(kind=4) :: kind ! [ code] Type of data
integer(kind=4) :: qual ! [ code] Quality of data
integer(kind=4) :: subscan ! [ ] Subscan number
integer(kind=obsnum_length) :: scan ! [ ] Scan number
! Written in the entry
real(kind=8) :: ut ! 1-2 [ rad] UT of observation
real(kind=8) :: st ! 3-4 [ rad] LST of observation
real(kind=4) :: az ! 5 [ rad] Azimuth
real(kind=4) :: el ! 6 [ rad] Elevation
real(kind=4) :: tau ! 7 [neper] Opacity
real(kind=4) :: tsys ! 8 [ K] System temperature
real(kind=4) :: time ! 9 [ s] Integration time
! Not in this section in file
integer(kind=4) :: xunit ! [ code] X unit (if X coordinates section is present)
! NOT in data ---
character(len=12) :: cdobs ! [string] Duplicate of dobs
character(len=12) :: cdred ! [string] Duplicate of dred
"""
keys_lengths = {
'unknown': [
#('NUM' ,1,'int32'), # Observation number
('VER' ,1,'int32'), # Version number
('TELES' ,3,'|S12') , # Telescope name
('DOBS' ,1,'int32'), # Date of observation
('DRED' ,1,'int32'), # Date of reduction
('TYPEC' ,1,'int32'), # Type of coordinates
('KIND' ,1,'int32'), # Type of data
('QUAL' ,1,'int32'), # Quality of data
('SCAN' ,1,'int32'), # Scan number
('SUBSCAN' ,1,'int32'), # Subscan number
],
'COMMENT': [ # -1
('LTEXT',1,'int32'), # integer(kind=4) :: ltext ! Length of comment
('CTEXT',1024//4,'|S1024'), # character ctext*1024 ! Comment string
],
'GENERAL': [ # -2
('UT' ,2,'float64'), # rad UT of observation
('ST' ,2,'float64'), # rad LST of observation
('AZ' ,1,'float32'), # rad Azimuth
('EL' ,1,'float32'), # rad Elevation
('TAU' ,1,'float32'), # neper Opacity
('TSYS' ,1,'float32'), # K System temperature
('TIME' ,1,'float32'), # s Integration time
# XUNIT should not be there?
#( 'XUNIT' ,1,'int32'), # code X unit (if xcoord_sec is present)
] ,
'POSITION': [ # -3
('SOURC',3,'|S12') , # [ ] Source name
('EPOCH',1,'float32'), # [ ] Epoch of coordinates
('LAM' ,2,'float64'), #[rad] Lambda
('BET' ,2,'float64'), #[rad] Beta
('LAMOF',1,'float32'), # [rad] Offset in Lambda
('BETOF',1,'float32'), # [rad] Offset in Beta
('PROJ' ,1,'int32') , # [rad] Projection system
('SL0P' ,1,'float64'), # lambda of descriptive system # MAY NOT EXIST IN OLD CLASS
('SB0P' ,1,'float64'), # beta of descriptive system # MAY NOT EXIST IN OLD CLASS
('SK0P' ,1,'float64'), # angle of descriptive system # MAY NOT EXIST IN OLD CLASS
],
'SPECTRO': [ # -4
#('align' ,1,'int32'), # [ ] Alignment padding
('LINE' ,3,'|S12'), # [ ] Line name
('RESTF' ,2,'float64'), # [ MHz] Rest frequency
('NCHAN' ,1,'int32'), # [ ] Number of channels
('RCHAN' ,1,'float32'), # [ ] Reference channels
('FRES' ,1,'float32'), # [ MHz] Frequency resolution
('FOFF' ,1,'float32'), # [ MHz] Frequency offset
('VRES' ,1,'float32'), # [km/s] Velocity resolution
('VOFF' ,1,'float32'), # [km/s] Velocity at reference channel
('BAD' ,1,'float32'), # [ ] Blanking value
#('ALIGN_1',1,'int32'), # [ ] Alignment padding
('IMAGE' ,2,'float64'), # [ MHz] Image frequency
#('ALIGN_2',1,'int32'), # [ ] Alignment padding
('VTYPE' ,1,'int32'), # [code] Type of velocity
('DOPPLER',2,'float64'), # [ ] Doppler factor = -V/c (CLASS convention)
],
'CALIBRATION': [ # -14
('ALIGN',1,'int32'), # BUFFER (it's a zero - it is not declared in the docs!!!!)
('BEEFF',1,'float32'), # [ ] Beam efficiency
('FOEFF',1,'float32'), # [ ] Forward efficiency
('GAINI',1,'float32'), # [ ] Image/Signal gain ratio
('H2OMM',1,'float32'), # [ mm] Water vapor content
('PAMB',1,'float32'), # [ hPa] Ambient pressure
('TAMB',1,'float32'), # [ K] Ambient temperature
('TATMS',1,'float32'), # [ K] Atmosphere temp. in signal band
('TCHOP',1,'float32'), # [ K] Chopper temperature
('TCOLD',1,'float32'), # [ K] Cold load temperature
('TAUS',1,'float32'), # [neper] Opacity in signal band
('TAUI',1,'float32'), # [neper] Opacity in image band
('TATMI',1,'float32'), # [ K] Atmosphere temp. in image band
('TREC',1,'float32'), # [ K] Receiver temperature
('CMODE',1,'int32'), # [ code] Calibration mode
('ATFAC',1,'float32'), # [ ] Applied calibration factor
('ALTI',1,'float32'), # [ m] Site elevation
('COUNT',3,'3float32'), # [count] Power of Atm., Chopp., Cold
('LCALOF',1,'float32'), # [ rad] Longitude offset for sky measurement
('BCALOF',1,'float32'), # [ rad] Latitude offset for sky measurement
('GEOLONG',1,'float64'), # [ rad] Geographic longitude of observatory # MAY NOT EXIST IN OLD CLASS
('GEOLAT',1,'float64'), # [ rad] Geographic latitude of observatory # MAY NOT EXIST IN OLD CLASS
],
'BASELINE':[
('DEG',1,'int32'), #! [ ] Degree of last baseline
('SIGFI',1,'float32'), #! [Int. unit] Sigma
('AIRE',1,'float32'), #! [Int. unit] Area under windows
('NWIND',1,'int32'), #! [ ] Number of line windows
# WARNING: These should probably have 'n', the second digit, = NWIND
# The docs are really unclear about this, they say "W1(MWIND)"
('W1MWIND',1,'float32'), #! [km/s] Lower limits of windows
('W2MWIND',1,'float32'), #! [km/s] Upper limits of windows
('SINUS',3,'float32'), #![] Sinus baseline results
],
'DRIFT':[ # 16?
('FREQ',1,'float64') , #! [ MHz] Rest frequency real(kind=8) ::
('WIDTH',1,'float32'), #! [ MHz] Bandwidth real(kind=4) ::
('NPOIN',1,'int32') , #! [ ] Number of data points integer(kind=4) ::
('RPOIN',1,'float32'), #! [ ] Reference point real(kind=4) ::
('TREF',1,'float32') , #! [ ?] Time at reference real(kind=4) ::
('AREF',1,'float32') , #! [ rad] Angular offset at ref. real(kind=4) ::
('APOS',1,'float32') , #! [ rad] Position angle of drift real(kind=4) ::
('TRES',1,'float32') , #! [ ?] Time resolution real(kind=4) ::
('ARES',1,'float32') , #! [ rad] Angular resolution real(kind=4) ::
('BAD',1,'float32') , #! [ ] Blanking value real(kind=4) ::
('CTYPE',1,'int32') , #! [code] Type of offsets integer(kind=4) ::
('CIMAG',1,'float64'), #! [ MHz] Image frequency real(kind=8) ::
('COLLA',1,'float32'), #! [ ?] Collimation error Az real(kind=4) ::
('COLLE',1,'float32'), #! [ ?] Collimation error El real(kind=4) ::
],
}
def _read_bytes(f, n):
'''Read the next `n` bytes (from idlsave)'''
return f.read(n)
"""
Warning: UNCLEAR what endianness should be!
Numpy seemed to get it right, and I think numpy assumes NATIVE endianness
"""
def _read_byte(f):
'''Read a single byte (from idlsave)'''
return numpy.uint8(struct.unpack('=B', f.read(4)[:1])[0])
def _read_int16(f):
'''Read a signed 16-bit integer (from idlsave)'''
return numpy.int16(struct.unpack('=h', f.read(4)[2:4])[0])
def _read_int32(f):
'''Read a signed 32-bit integer (from idlsave)'''
return numpy.int32(struct.unpack('=i', f.read(4))[0])
def _read_int64(f):
'''Read a signed 64-bit integer '''
return numpy.int64(struct.unpack('=q', f.read(8))[0])
def _read_float32(f):
'''Read a 32-bit float (from idlsave)'''
return numpy.float32(struct.unpack('=f', f.read(4))[0])
def _align_32(f):
'''Align to the next 32-bit position in a file (from idlsave)'''
pos = f.tell()
if pos % 4 != 0:
f.seek(pos + 4 - pos % 4)
return
def _read_word(f,length):
if length > 0:
chars = _read_bytes(f, length)
_align_32(f)
else:
chars = None
return chars
def _read_int(f):
return struct.unpack('i',f.read(4))
def is_ascii(s):
"""Check if there are non-ascii characters in Unicode string
Parameters
----------
s : str
The string to be checked
Returns
-------
is_ascii : bool
Returns True if all characters in the string are ascii. False
otherwise.
"""
return len(s) == len(s.decode('ascii').encode('utf-8'))
def is_all_null(s):
return all(x=='\x00' for x in s) or all(x==b'\x00' for x in s)
"""
from clic_file.f90: v1, v2
integer(kind=4) :: bloc ! 1 : observation address [records] integer(kind=8) :: bloc ! 1- 2: observation address [records] integer(kind=4) :: bloc ! 1 : block read from index
integer(kind=4) :: num ! 2 : observation number integer(kind=4) :: word ! 3 : address offset [4-bytes] integer(kind=4) :: num ! 2 : number read
integer(kind=4) :: ver ! 3 : observation version integer(kind=4) :: ver ! 4 : observation version integer(kind=4) :: ver ! 3 : version read from index
integer(kind=4) :: sourc(3) ! 4- 6: source name integer(kind=8) :: num ! 5- 6: observation number character(len=12) :: csour ! 4- 6: source read from index
integer(kind=4) :: line(3) ! 7- 9: line name integer(kind=4) :: sourc(3) ! 7- 9: source name character(len=12) :: cline ! 7- 9: line read from index
integer(kind=4) :: teles(3) ! 10-12: telescope name integer(kind=4) :: line(3) ! 10-12: line name character(len=12) :: ctele ! 10-12: telescope read from index
integer(kind=4) :: dobs ! 13 : observation date [class_date] integer(kind=4) :: teles(3) ! 13-15: telescope name integer(kind=4) :: dobs ! 13 : date obs. read from index
integer(kind=4) :: dred ! 14 : reduction date [class_date] integer(kind=4) :: dobs ! 16 : observation date [class_date] integer(kind=4) :: dred ! 14 : date red. read from index
real(kind=4) :: off1 ! 15 : lambda offset [radian] integer(kind=4) :: dred ! 17 : reduction date [class_date] real(kind=4) :: off1 ! 15 : read offset 1
real(kind=4) :: off2 ! 16 : beta offset [radian] real(kind=4) :: off1 ! 18 : lambda offset [radian] real(kind=4) :: off2 ! 16 : read offset 2
integer(kind=4) :: typec ! 17 : coordinates types real(kind=4) :: off2 ! 19 : beta offset [radian] integer(kind=4) :: type ! 17 : type of read offsets
integer(kind=4) :: kind ! 18 : data kind integer(kind=4) :: typec ! 20 : coordinates types integer(kind=4) :: kind ! 18 : type of observation
integer(kind=4) :: qual ! 19 : data quality integer(kind=4) :: kind ! 21 : data kind integer(kind=4) :: qual ! 19 : Quality read from index
integer(kind=4) :: scan ! 20 : scan number integer(kind=4) :: qual ! 22 : data quality integer(kind=4) :: scan ! 20 : Scan number read from index
integer(kind=4) :: proc ! 21 : procedure type integer(kind=4) :: scan ! 23 : scan number real(kind=4) :: posa ! 21 : Position angle
integer(kind=4) :: itype ! 22 : observation type integer(kind=4) :: proc ! 24 : procedure type integer(kind=4) :: subscan ! 22 : Subscan number
real(kind=4) :: houra ! 23 : hour angle [radian] integer(kind=4) :: itype ! 25 : observation type integer(kind=4) :: pad(10) ! 23-32: Pad to 32 words
integer(kind=4) :: project ! 24 : project name real(kind=4) :: houra ! 26 : hour angle [radian]
integer(kind=4) :: pad1 ! 25 : unused word integer(kind=4) :: project(2) ! 27 : project name
integer(kind=4) :: bpc ! 26 : baseline bandpass cal status integer(kind=4) :: bpc ! 29 : baseline bandpass cal status
integer(kind=4) :: ic ! 27 : instrumental cal status integer(kind=4) :: ic ! 30 : instrumental cal status
integer(kind=4) :: recei ! 28 : receiver number integer(kind=4) :: recei ! 31 : receiver number
real(kind=4) :: ut ! 29 : UT [s] real(kind=4) :: ut ! 32 : UT [s]
integer(kind=4) :: pad2(3) ! 30-32: padding to 32 4-bytes word
equivalently
integer(kind=obsnum_length) :: num ! [ ] Observation number
integer(kind=4) :: ver ! [ ] Version number
integer(kind=4) :: teles(3) ! [ ] Telescope name
integer(kind=4) :: dobs ! [MJD-60549] Date of observation
integer(kind=4) :: dred ! [MJD-60549] Date of reduction
integer(kind=4) :: typec ! [ code] Type of coordinates
integer(kind=4) :: kind ! [ code] Type of data
integer(kind=4) :: qual ! [ code] Quality of data
integer(kind=4) :: subscan ! [ ] Subscan number
integer(kind=obsnum_length) :: scan ! [ ] Scan number
"""
"""
index.f90:
call conv%read%i8(data(1), indl%bloc, 1) ! bloc
call conv%read%i4(data(3), indl%word, 1) ! word
call conv%read%i8(data(4), indl%num, 1) ! num
call conv%read%i4(data(6), indl%ver, 1) ! ver
call conv%read%cc(data(7), indl%csour, 3) ! csour
call conv%read%cc(data(10),indl%cline, 3) ! cline
call conv%read%cc(data(13),indl%ctele, 3) ! ctele
call conv%read%i4(data(16),indl%dobs, 1) ! dobs
call conv%read%i4(data(17),indl%dred, 1) ! dred
call conv%read%r4(data(18),indl%off1, 1) ! off1
call conv%read%r4(data(19),indl%off2, 1) ! off2
call conv%read%i4(data(20),indl%type, 1) ! type
call conv%read%i4(data(21),indl%kind, 1) ! kind
call conv%read%i4(data(22),indl%qual, 1) ! qual
call conv%read%r4(data(23),indl%posa, 1) ! posa
call conv%read%i8(data(24),indl%scan, 1) ! scan
call conv%read%i4(data(26),indl%subscan,1) ! subscan
if (isv3) then
call conv%read%r8(data(27),indl%ut, 1) ! ut
else
"""
def _read_indices(f, file_description):
#if file_description['version'] in (1,2):
# extension_positions = (file_description['aex']-1)*file_description['reclen']*4
# all_indices = {extension:
# [_read_index(f,
# filetype=file_description['version'],
# entry=ii,
# #position=position,
# )
# for ii in range(file_description['lex1'])]
# for extension,position in enumerate(extension_positions)
# if position > 0
# }
#elif file_description['version'] == 1:
extension_positions = ((file_description['aex'].astype('int64')-1)
*file_description['reclen']*4)
all_indices = [_read_index(f,
filetype=file_description['version'],
# 1-indexed files
entry_number=ii+1,
file_description=file_description,
)
for ii in range(file_description['xnext']-1)]
#else:
# raise ValueError("Invalid file version {0}".format(file_description['version']))
return all_indices
def _find_index(entry_number, file_description, return_position=False):
if file_description['gex'] == 10:
kex=(entry_number-1)//file_description['lex1'] + 1
else:
# exponential growth:
#kex = gi8_dicho(file_description['nex'], file_description['lexn'], entry_number) - 1
kex = len([xx for xx in file_description['lexn'] if xx<entry_number])
ken = entry_number - file_description['lexn'][kex-1]
#! Find ken (relative entry number in the extension, starts from 1)
#ken = entry_num - file%desc%lexn(kex-1)
kb = ((ken-1)*file_description['lind'])//file_description['reclen']
#kb = ((ken-1)*file%desc%lind)/file%desc%reclen ! In the extension, the
# ! relative record position (as an offset, starts from 0) where the
# ! Entry Index starts. NB: there can be a non-integer number of Entry
# ! Indexes per record
# Subtract 1: 'aex' is 1-indexed
kbl = (file_description['aex'][kex-1]+kb)-1
# kbl = file%desc%aex(kex)+kb ! The absolute record number where the Entry Index goes
k = ((ken-1)*file_description['lind']) % file_description['reclen']
#k = mod((ken-1)*file%desc%lind,file%desc%reclen)+1 ! = in the record, the
# ! first word of the Entry Index of the entry number 'entry_num'
if return_position:
return (kbl*file_description['reclen']+k)*4
else:
return kbl,k
def _read_index(f, filetype='v1', DEBUG=False, clic=False, position=None,
entry_number=None, file_description=None):
if position is not None:
f.seek(position)
if entry_number is not None:
indpos = _find_index(entry_number, file_description, return_position=True)
f.seek(indpos)
x0 = f.tell()
if filetype in ('1A ','v1', 1):
log.debug('Index filetype 1A')
index = {
"XBLOC":_read_int32(f),
"XNUM":_read_int32(f),
"XVER":_read_int32(f),
"XSOURC":_read_word(f,12),
"XLINE":_read_word(f,12),
"XTEL":_read_word(f,12),
"XDOBS":_read_int32(f),
"XDRED":_read_int32(f),
"XOFF1":_read_float32(f),# first offset (real, radians)
"XOFF2":_read_float32(f),# second offset (real, radians)
"XTYPE":_read_int32(f),# coordinate system ('EQ'', 'GA', 'HO')
"XKIND":_read_int32(f),# Kind of observation (0: spectral, 1: continuum, )
"XQUAL":_read_int32(f),# Quality (0-9)
"XSCAN":_read_int32(f),# Scan number
}
index['BLOC'] = index['XBLOC'] # v2 compatibility
index['WORD'] = 1 # v2 compatibility
index['SOURC'] = index['CSOUR'] = index['XSOURC']
index['DOBS'] = index['CDOBS'] = index['XDOBS']
index['CTELE'] = index['XTEL']
index['LINE'] = index['XLINE']
index['OFF1'] = index['XOFF1']
index['OFF2'] = index['XOFF2']
index['QUAL'] = index['XQUAL']
index['SCAN'] = index['XSCAN']
index['KIND'] = index['XKIND']
if clic: # use header set up in clic
nextchunk = {
"XPROC":_read_int32(f),# "procedure type"
"XITYPE":_read_int32(f),#
"XHOURANG":_read_float32(f),#
"XPROJNAME":_read_int32(f),#
"XPAD1":_read_int32(f),
"XBPC" :_read_int32(f),
"XIC" :_read_int32(f),
"XRECEI" :_read_int32(f),
"XUT":_read_float32(f),
"XPAD2":numpy.fromfile(f,count=3,dtype='int32') # BLANK is NOT ALLOWED!!! It is a special KW
}
else:
nextchunk = {"XPOSA":_read_float32(f),
"XSUBSCAN":_read_int32(f),
'XPAD2': numpy.fromfile(f,count=10,dtype='int32'),
}
nextchunk['SUBSCAN'] = nextchunk['XSUBSCAN']
nextchunk['POSA'] = nextchunk['XPOSA']
index.update(nextchunk)
if (f.tell() - x0 != 128):
missed_bits = (f.tell()-x0)
X = f.read(128-missed_bits)
if DEBUG: print("read_index missed %i bits: %s" % (128-missed_bits,X))
#raise IndexError("read_index did not successfully read 128 bytes at %i. Read %i bytes." % (x0,f.tell()-x0))
if any(not is_ascii(index[x]) for x in ('XSOURC','XLINE','XTEL')):
raise ValueError("Invalid index read from {0}.".format(x0))
elif filetype in ('2A ','v2', 2):
log.debug('Index filetype 2A')
index = {
"BLOC" : _read_int64(f) , #(data(1), 1) ! bloc
"WORD" : _read_int32(f) , #(data(3), 1) ! word
"NUM" : _read_int64(f) , #(data(4), 1) ! num
"VER" : _read_int32(f) , #(data(6), 1) ! ver
"CSOUR" : _read_word(f,12), #(data(7), 3) ! csour
"CLINE" : _read_word(f,12), #(data(10), 3) ! cline
"CTELE" : _read_word(f,12), #(data(13), 3) ! ctele
"DOBS" : _read_int32(f) , #(data(16), 1) ! dobs
"DRED" : _read_int32(f) , #(data(17), 1) ! dred
"OFF1" : _read_float32(f), #(data(18), 1) ! off1
"OFF2" : _read_float32(f), #(data(19), 1) ! off2
"TYPE" : _read_int32(f) , #(data(20), 1) ! type
"KIND" : _read_int32(f) , #(data(21), 1) ! kind
"QUAL" : _read_int32(f) , #(data(22), 1) ! qual
"POSA" : _read_float32(f), #(data(23), 1) ! posa
"SCAN" : _read_int64(f) , #(data(24), 1) ! scan
"SUBSCAN": _read_int32(f) , #(data(26), 1) ! subscan
}
#last24bits = f.read(24)
#log.debug("Read 24 bits: '{0}'".format(last24bits))
if any((is_all_null(index[x]) or not is_ascii(index[x]))
for x in ('CSOUR','CLINE','CTELE')):
raise ValueError("Invalid index read from {0}.".format(x0))
index['SOURC'] = index['XSOURC'] = index['CSOUR']
index['LINE'] = index['XLINE'] = index['CLINE']
index['XKIND'] = index['KIND']
try:
index['DOBS'] = index['XDOBS'] = index['CDOBS']
except KeyError:
index['CDOBS'] = index['XDOBS'] = index['DOBS']
else:
raise NotImplementedError("Filetype {0} not implemented.".format(filetype))
# from kernel/lib/gsys/date.f90: gag_julda
index['MJD'] = index['DOBS'] + 60549
class_dobs = index['DOBS']
index['DOBS'] = ((class_dobs + 365*2025)/365.2425 + 1)
# SLOW
#index['DATEOBS'] = Time(index['DOBS'], format='jyear')
#index['DATEOBSS'] = index['DATEOBS'].iso
log.debug("Indexing finished at {0}".format(f.tell()))
return index
def _read_header(f, type=0, position=None):
"""
Read a header entry from a CLASS file
(helper function)
"""
if position is not None:
f.seek(position)
if type in keys_lengths:
hdrsec = [(x[0],numpy.fromfile(f,count=1,dtype=x[2])[0])
for x in keys_lengths[type]]
return dict(hdrsec)
else:
return {}
raise ValueError("Unrecognized type {0}".format(type))
def _read_first_record(f):
f.seek(0)
filetype = f.read(4)
if fileversion_dict[filetype] == 'v1':
return _read_first_record_v1(f)
elif fileversion_dict[filetype] == 'v2':
return _read_first_record_v2(f)
else:
raise ValueError("Unrecognized filetype {0}".format(filetype))
def _read_first_record_v1(f, record_length_words=128):
r"""
Position & Parameter & Fortran Kind & Purpose \\
\hline
1 & {\tt code} & Character*4 & File code \\
2 & {\tt next} & Integer*4 & Next free record \\
3 & {\tt lex} & Integer*4 & Length of first extension (number of entries) \\
4 & {\tt nex} & Integer*4 & Number of extensions \\
5 & {\tt xnext} & Integer*4 & Next available entry number \\
6:2*{\tt reclen} & {\tt ex(:)} & Integer*4 & Array of extension addresses
from classic_mod.f90:
integer(kind=4) :: code ! 1 File code
integer(kind=4) :: next ! 2 Next free record
integer(kind=4) :: lex ! 3 Extension length (number of entries)
integer(kind=4) :: nex ! 4 Number of extensions
integer(kind=4) :: xnext ! 5 Next available entry number
integer(kind=4) :: aex(mex_v1) ! 6:256 Extension addresses
from old (<dec2013) class, file.f90:
read(ilun,rec=1,err=11,iostat=ier) ibx%code,ibx%next, &
& ibx%ilex,ibx%imex,ibx%xnext
also uses filedesc_v1tov2 from classic/lib/file.f90
"""
# OLD NOTES
# hdr = header
# hdr.update(obshead) # re-overwrite things
# hdr.update({'OBSNUM':obsnum,'RECNUM':spcount})
# hdr.update({'RA':hdr['LAM']/pi*180,'DEC':hdr['BET']/pi*180})
# hdr.update({'RAoff':hdr['LAMOF']/pi*180,'DECoff':hdr['BETOF']/pi*180})
# hdr.update({'OBJECT':hdr['SOURC'].strip()})
# hdr.update({'BUNIT':'Tastar'})
# hdr.update({'EXPOSURE':hdr['TIME']})
f.seek(0)
file_description = {
'code': f.read(4),
'next': _read_int32(f),
'lex': _read_int32(f),
'nex': _read_int32(f),
'xnext': _read_int32(f),
'gex': 10.,
'vind': 1, # classic_vind_v1 packages/classic/lib/classic_mod.f90
'version': 1,
'nextrec': 3,
'nextword': 1,
'lind': 32, #classic_lind_v1 packages/classic/lib/classic_mod.f90
'kind': 'unknown',
'flags': 0,
}
file_description['reclen'] = record_length_words # should be 128w = 512 bytes
ex = np.fromfile(f, count=(record_length_words*2-5), dtype='int32')
file_description['ex'] = ex[ex!=0]
file_description['nextrec'] = file_description['next'] # this can't be...
file_description['lex1'] = file_description['lex'] # number of entries
file_description['lexn'] = (np.arange(file_description['nex']+1) *
file_description['lex1'])
file_description['nentries'] = np.sum(file_description['lexn'])
file_description['aex'] = file_description['ex'][:file_description['nex']]
#file_description['version'] = fileversion_dict[file_description['code']]
assert f.tell() == 1024
# Something is not quite right with the 'ex' parsing
#assert len(file_description['ex']) == file_description['nex']
return file_description
def _read_first_record_v2(f):
r""" packages/classic/lib/file.f90
Position & Parameter & Fortran Kind & Purpose & Unit \\
\hline
1 & {\tt code} & Character*4 & File code & - \\
2 & {\tt reclen} & Integer*4 & Record length & words \\
3 & {\tt kind} & Integer*4 & File kind & - \\
4 & {\tt vind} & Integer*4 & Index version & - \\
5 & {\tt lind} & Integer*4 & Index length & words \\
6 & {\tt flags} & Integer*4 & Bit flags. \#1: single or multiple, & - \\
& & & \#2-32: provision (0-filled) & \\
\hline
7:8 & {\tt xnext} & Integer*8 & Next available entry number & - \\
9:10 & {\tt nextrec} & Integer*8 & Next record which contains free space & record \\
11 & {\tt nextword} & Integer*4 & Next free word in this record & word \\
\hline
12 & {\tt lex1} & Integer*4 & Length of first extension index & entries \\
13 & {\tt nex} & Integer*4 & Number of extensions & - \\
14 & {\tt gex} & Integer*4 & Extension growth rule & - \\
15:{\tt reclen} & {\tt aex(:)} & Integer*8 & Array of extension addresses & record
"""
f.seek(0)
file_description = {
'code': f.read(4),
'reclen': _read_int32(f),
'kind': _read_int32(f),
'vind': _read_int32(f),
'lind': _read_int32(f),
'flags': _read_int32(f),
'xnext': _read_int64(f),
'nextrec': _read_int64(f),
'nextword': _read_int32(f),
'lex1': _read_int32(f),
'nex': _read_int32(f),
'gex': _read_int32(f),
}
file_description['lexn'] = [0]
if file_description['gex'] == 10:
for ii in range(1, file_description['nex']+1):
file_description['lexn'].append(file_description['lexn'][-1]+file_description['lex1'])
else:
#! Exponential growth. Only growth with mantissa 2.0 is supported
for ii in range(1, file_description['nex']):
# I don't know what the fortran does here!!!
# ahh, maybe 2_8 means int(2, dtype='int64')
nent = int(file_description['lex1'] * 2**(ii-1))
#nent = int(file%desc%lex1,kind=8) * 2_8**(iex-1)
file_description['lexn'].append(file_description['lexn'][-1]+nent)
#file%desc%lexn(iex) = file%desc%lexn(iex-1) + nent
file_description['nentries'] = np.sum(file_description['lexn'])
record_length_words = file_description['reclen']
aex = numpy.fromfile(f, count=(record_length_words-15)//2, dtype='int64')
file_description['aex'] = aex[aex!=0]
assert len(file_description['aex']) == file_description['nex']
file_description['version'] = 2
return file_description
def gi8_dicho(ninp,lexn,xval,ceil=True):
"""
! @ public
! Find ival such as
! X(ival-1) < xval <= X(ival) (ceiling mode)
! or
! X(ival) <= xval < X(ival+1) (floor mode)
! for input data ordered. Use a dichotomic search for that.
call gi8_dicho(nex,file%desc%lexn,entry_num,.true.,kex,error)
"""
#integer(kind=size_length), intent(in) :: np ! Number of input points
#integer(kind=8), intent(in) :: x(np) ! Input ordered Values
#integer(kind=8), intent(in) :: xval ! The value we search for
#logical, intent(in) :: ceil ! Ceiling or floor mode?
#integer(kind=size_length), intent(out) :: ival ! Position in the array
#logical, intent(inout) :: error ! Logical error flag
iinf = 1
isup = ninp
#! Ceiling mode
while isup > (iinf+1):
imid = int(np.floor((isup + iinf)/2.))
if (lexn[imid-1] < xval):
iinf = imid
else:
isup = imid
ival = isup
return ival
def _read_obshead(f, file_description, position=None, verbose=False):
if file_description['version'] == 1:
return _read_obshead_v1(f, position=position, verbose=verbose)
if file_description['version'] == 2:
return _read_obshead_v2(f, position=position)
else:
raise ValueError("Invalid file version {0}.".
format(file_description['version']))
def _read_obshead_v2(f, position=None):
"""
! Version 2 (public)
integer(kind=4), parameter :: entrydescv2_nw1=11 ! Number of words, in 1st part
integer(kind=4), parameter :: entrydescv2_nw2=5 ! Number of words for 1 section in 2nd part
type classic_entrydesc_t
sequence
integer(kind=4) :: code ! 1 : code observation icode
integer(kind=4) :: version ! 2 : observation version
integer(kind=4) :: nsec ! 3 : number of sections
integer(kind=4) :: pad1 ! - : memory padding (not in data)
integer(kind=8) :: nword ! 4- 5: number of words
integer(kind=8) :: adata ! 6- 7: data address
integer(kind=8) :: ldata ! 8- 9: data length
integer(kind=8) :: xnum ! 10-11: entry number
! Out of the 'sequence' block:
integer(kind=4) :: msec ! Not in data: maximum number of sections the
! Observation Index can hold
integer(kind=4) :: pad2 ! Memory padding for 8 bytes alignment
integer(kind=4) :: seciden(classic_maxsec) ! Section Numbers (on disk: 1 to ed%nsec)
integer(kind=8) :: secleng(classic_maxsec) ! Section Lengths (on disk: 1 to ed%nsec)
integer(kind=8) :: secaddr(classic_maxsec) ! Section Addresses (on disk: 1 to ed%nsec)
end type classic_entrydesc_t
"""
if position is not None:
f.seek(position)
else:
position = f.tell()
IDcode = f.read(4)
if IDcode.strip() != b'2':
raise IndexError("Observation Header reading failure at {0}. "
"Record does not appear to be an observation header.".
format(position))
f.seek(position)
entrydescv2_nw1 = 11
entrydescv2_nw2 = 5
obshead = {
'CODE': f.read(4),
'VERSION': _read_int32(f),
'NSEC': _read_int32(f),
#'_blank': _read_int32(f),
'NWORD': _read_int64(f),
'ADATA': _read_int64(f),
'LDATA': _read_int64(f),
'XNUM': _read_int64(f),
#'MSEC': _read_int32(f),
#'_blank2': _read_int32(f),
}
section_numbers = np.fromfile(f, count=obshead['NSEC'], dtype='int32')
section_lengths = np.fromfile(f, count=obshead['NSEC'], dtype='int64')
section_addresses = np.fromfile(f, count=obshead['NSEC'], dtype='int64')
return obshead['XNUM'],obshead,dict(zip(section_numbers,section_addresses))
def _read_obshead_v1(f, position=None, verbose=False):
"""
Read the observation header of a CLASS file
(helper function for read_class; should not be used independently)
"""
if position is not None:
f.seek(position)
IDcode = f.read(4)
if IDcode.strip() != b'2':
raise IndexError("Observation Header reading failure at {0}. "
"Record does not appear to be an observation header.".
format(f.tell() - 4))
(nblocks, nbyteob, data_address, nheaders, data_length, obindex, nsec,
obsnum) = numpy.fromfile(f, count=8, dtype='int32')
if verbose:
print("nblocks,nbyteob,data_address,data_length,nheaders,obindex,nsec,obsnum",nblocks,nbyteob,data_address,data_length,nheaders,obindex,nsec,obsnum)
print("DATA_LENGTH: ",data_length)
seccodes = numpy.fromfile(f,count=nsec,dtype='int32')
# Documentation says addresses then length: It is apparently wrong
seclen = numpy.fromfile(f,count=nsec,dtype='int32')
secaddr = numpy.fromfile(f,count=nsec,dtype='int32')
if verbose:
print("Section codes, addresses, lengths: ",seccodes,secaddr,seclen)
hdr = {'NBLOCKS':nblocks, 'NBYTEOB':nbyteob, 'DATAADDR':data_address,
'DATALEN':data_length, 'NHEADERS':nheaders, 'OBINDEX':obindex,
'NSEC':nsec, 'OBSNUM':obsnum}
#return obsnum,seccodes
return obsnum,hdr,dict(zip(seccodes,secaddr))
# THIS IS IN READ_OBSHEAD!!!
# def _read_preheader(f):
# """
# Not entirely clear what this is, but it is stuff that precedes the actual data
#
# Looks something like this:
# array([ 1, -2, -3, -4, -14,
# 9, 17, 18, 25, 55,
# 64, 81, 99, -1179344801, 979657591,
#
# -2, -3, -4, -14 indicate the 4 header types
# 9,17,18,25 *MAY* indicate the number of bytes in each
#
#
# HOW is it indicated how many entries there are?
# """
# # 13 comes from counting 1, -2,....99 above
# numbers = np.fromfile(f, count=13, dtype='int32')
# sections = [n for n in numbers if n in header_id_numbers]
# return sections
def downsample_1d(myarr,factor,estimator=np.mean, weight=None):
"""
Downsample a 1D array by averaging over *factor* pixels.
Crops right side if the shape is not a multiple of factor.
This code is pure numpy and should be fast.
keywords:
estimator - default to mean. You can downsample by summing or
something else if you want a different estimator
(e.g., downsampling error: you want to sum & divide by sqrt(n))
weight: np.ndarray
An array of weights to use for the downsampling. If None,
assumes uniform 1
"""
if myarr.ndim != 1:
raise ValueError("Only works on 1d data. Says so in the title.")
xs = myarr.size
crarr = myarr[:xs-(xs % int(factor))]
if weight is None:
dsarr = estimator(np.concatenate([[crarr[i::factor] for i in
range(factor)]]),axis=0)
else:
dsarr = estimator(np.concatenate([[crarr[i::factor]*weight[i::factor] for i in
range(factor)]]),axis=0)
warr = estimator(np.concatenate([[weight[i::factor] for i in
range(factor)]]),axis=0)
dsarr = dsarr/warr
return dsarr
# unit test
def test_downsample1d():
data = np.arange(10)
weight = np.ones(10)
weight[5]=0
assert np.all(downsample_1d(data, 2, weight=weight, estimator=np.mean) ==
np.array([0.5, 2.5, 4.0, 6.5, 8.5]))
def read_observation(f, obsid, file_description=None, indices=None,
my_memmap=None, memmap=True, verbose=False):
if isinstance(f, str):
f = open(f,'rb')
opened = True
if memmap:
my_memmap = numpy.memmap(f, offset=0, dtype='float32',
mode='r')
else:
my_memmap = None
elif my_memmap is None and memmap:
raise ValueError("Must pass in a memmap object if passing in a file object.")
else:
opened = False
if file_description is None:
file_description = _read_first_record(f)
if indices is None:
indices = _read_indices(f, file_description)
index = indices[obsid]
obs_position = (index['BLOC']-1)*file_description['reclen']*4 + (index['WORD']-1)*4
log.debug("Reading observation at position {0}".format(obs_position))
obsnum,obshead,sections = _read_obshead(f, file_description,
position=obs_position,
verbose=verbose)
header = obshead
datastart = 0
for section_id,section_address in iteritems(sections):
# Section addresses are 1-indexed byte addresses
# in the current "block"
sec_position = obs_position + (section_address-1)*4
temp_hdr = _read_header(f, type=header_id_numbers[section_id],
position=sec_position)
header.update(temp_hdr)
datastart = max(datastart,f.tell())
hdr = header
hdr.update(obshead) # re-overwrite things
hdr.update({'OBSNUM':obsnum,'RECNUM':obsid})
hdr.update({'RA':hdr['LAM']/pi*180,'DEC':hdr['BET']/pi*180})
hdr.update({'RAoff':hdr['LAMOF']/pi*180,'DECoff':hdr['BETOF']/pi*180})
hdr.update({'OBJECT':hdr['SOURC'].strip()})
hdr.update({'BUNIT':'Tastar'})
hdr.update({'EXPOSURE':float(hdr['TIME'])})
hdr['HDRSTART'] = obs_position
hdr['DATASTART'] = datastart
hdr.update(indices[obsid])
# Define MJD as mid-exposure time in MJD
hdr.update({'OBSDATE': hdr['MJD'] + hdr['UT']/2./pi})
# Apparently the data are still valid in this case?
#if hdr['XNUM'] != obsid+1:
# log.error("The spectrum read was {0} but {1} was requested.".
# format(hdr['XNUM']-1, obsid))
if hdr['KIND'] == 1: # continuum
nchan = hdr['NPOIN']
elif 'NCHAN' in hdr:
nchan = hdr['NCHAN']
else:
log.error("No NCHAN in header. This is not a spectrum.")
import ipdb; ipdb.set_trace()
# There may be a 1-channel offset? CHECK!!!
# (changed by 1 pixel - October 14, 2014)
# (changed back - October 21, 2014 - I think the ends are just bad, but not
# zero.)
f.seek(datastart-1)
spec = _read_spectrum(f, position=datastart-1, nchan=nchan,
memmap=memmap, my_memmap=my_memmap)
if opened:
f.close()
return spec, hdr
def _read_spectrum(f, position, nchan, my_memmap=None, memmap=True):
if position != f.tell():
log.warning("Reading data from {0}, but the file is wound "
"to {1}.".format(position, f.tell()))
if memmap:
here = position
#spectrum = numpy.memmap(filename, offset=here, dtype='float32',
# mode='r', shape=(nchan,))
spectrum = my_memmap[here//4:here//4+nchan]
f.seek(here+nchan*4)
else:
f.seek(position)
spectrum = numpy.fromfile(f,count=nchan,dtype='float32')
return spectrum
def _spectrum_from_header(fileobj, header, memmap=None):
return _read_spectrum(fileobj, position=header['DATASTART'],
nchan=header['NCHAN'] if 'NCHAN' in hdr else hdr['NPOIN'],
my_memmap=memmap)
def clean_header(header):
newheader = {}
for k in header:
if not isinstance(header[k], (int, float, str)):
if isinstance(header[k], np.ndarray) and header[k].size > 1:
if header[k].size > 10:
raise ValueError("Large array being put in header. That's no good. key={0}".format(k))
for ii,val in enumerate(header[k]):
newheader[k[:7]+str(ii)] = val
else:
newheader[k[:8]] = str(header[k])
else:
newheader[k[:8]] = header[k]
return newheader
class ClassObject(object):
def __init__(self, filename, verbose=False):
t0 = time.time()
self._file = open(filename, 'rb')
self.file_description = _read_first_record(self._file)
self.allind = _read_indices(self._file, self.file_description)
self._data = np.memmap(self._file, dtype='float32', mode='r')
if verbose: log.info("Setting _spectra")
self._spectra = LazyItem(self)
t1 = time.time()
if verbose: log.info("Setting posang. t={0}".format(t1-t0))
self.set_posang()
t2 = time.time()
if verbose: log.info("Identifying otf scans. t={0}".format(t2-t1))
self._identify_otf_scans(verbose=verbose)
t3 = time.time()
#self._load_all_spectra()
if verbose:
log.info("Loaded CLASS object with {3} indices. Time breakdown:"
" {0}s for indices, "
"{1}s for posang, and {2}s for OTF scan identification"
.format(t1-t0, t2-t1, t3-t2, len(self.allind)))
def __repr__(self):
s = "\n".join(["{k}: {v}".format(k=k,v=v)
for k,v in iteritems(self.getinfo())])
return "ClassObject({id}) with {nspec} entries\n".format(id=id(self),
nspec=len(self.allind)) + s
def getinfo(self, allsources=False):
info = dict(
tels = self.tels,
lines = self.lines,
scans = self.scans,
sources = self.sources if allsources else self.sci_sources,
)
return info
def set_posang(self):
h0 = self.headers[0]
for h in self.headers:
dx = h['OFF1'] - h0['OFF1']
dy = h['OFF2'] - h0['OFF2']
h['COMPPOSA'] = np.arctan2(dy,dx)*180/np.pi
h0 = h
def _identify_otf_scans(self, verbose=False):
h0 = self.allind[0]
st = 0
otfscan = 0
posangs = [h['COMPPOSA'] for h in self.allind]
if verbose:
pb = ProgressBar(len(self.allind))
for ii,h in enumerate(self.allind):
if (h['SCAN'] != h0['SCAN']
or h['SOURC'] != h0['SOURC']):
h0['FIRSTSCAN'] = st
cpa = np.median(posangs[st:ii])
for hh in self.allind[st:ii]:
hh['SCANPOSA'] = cpa % 180
st = ii
if h['SCAN'] == h0['SCAN']:
h0['OTFSCAN'] = otfscan
otfscan += 1
h['OTFSCAN'] = otfscan
else:
otfscan = 0
h['OTFSCAN'] = otfscan
else:
h['OTFSCAN'] = otfscan
if verbose:
pb.update(ii)
def listscans(self, source=None, telescope=None, out=sys.stdout):
minid=0
scan = -1
sourc = ""
#tel = ''
minoff1,maxoff1 = np.inf,-np.inf
minoff2,maxoff2 = np.inf,-np.inf
ttlangle,nangle = 0.0,0
print("{entries:15s} {SOURC:12s} {XTEL:12s} {SCAN:>8s} {SUBSCAN:>8s} "
"[ {RAmin:>12s}, {RAmax:>12s} ] "
"[ {DECmin:>12s}, {DECmax:>12s} ] "
"{angle:>12s} {SCANPOSA:>12s} {OTFSCAN:>8s} {TSYS:>8s} {UTD:>12s}"
.format(entries='Scans', SOURC='Source', XTEL='Telescope',
SCAN='Scan', SUBSCAN='Subscan',
RAmin='min(RA)', RAmax='max(RA)',
DECmin='min(DEC)', DECmax='max(DEC)',
SCANPOSA='Scan PA',
angle='Angle', OTFSCAN='OTFscan',
TSYS='TSYS', UTD='UTD'),
file=out)
data_rows = []
for ii,row in enumerate(self.headers):
if (row['SCAN'] == scan
and row['SOURC'] == sourc
#and row['XTEL'] == tel
):
minoff1 = min(minoff1, row['OFF1'])
maxoff1 = max(maxoff1, row['OFF1'])
minoff2 = min(minoff2, row['OFF2'])
maxoff2 = max(maxoff2, row['OFF2'])
ttlangle += np.arctan2(row['OFF2'] - prevrow['OFF2'],
row['OFF1'] - prevrow['OFF1'])%np.pi
nangle += 1
prevrow = row
else:
if scan == -1:
scan = row['SCAN']
sourc = row['SOURC']
#tel = row['XTEL']
prevrow = row
continue
ok = True
if source is not None:
if isinstance(source, (list,tuple)):
ok = ok and any(re.search((s), prevrow['SOURC'])
for s in source)
else:
ok = ok and re.search((source), prevrow['SOURC'])
if telescope is not None:
ok = ok and re.search((telescope), prevrow['XTEL'])
if ok:
data = dict(RAmin=minoff1*180/np.pi*3600,
RAmax=maxoff1*180/np.pi*3600,
DECmin=minoff2*180/np.pi*3600,
DECmax=maxoff2*180/np.pi*3600,
angle=(ttlangle/nangle)*180/np.pi if nangle>0 else 0,
e0=minid,
e1=ii-1,
#TSYS=row['TSYS'] if 'TSYS' in row else '--',
UTD=row['DOBS']+row['UT'] if 'UT' in row else -99,
**prevrow)
print("{e0:7d}-{e1:7d} {SOURC:12s} {XTEL:12s} {SCAN:8d} {SUBSCAN:8d} "
"[ {RAmin:12f}, {RAmax:12f} ] "
"[ {DECmin:12f}, {DECmax:12f} ] "
"{angle:12.1f} {SCANPOSA:12.1f} {OTFSCAN:8d}"
" {TSYS:>8.1f} {UTD:12f}".
format(**data),
file=out)
data_rows.append(data)
minoff1,maxoff1 = np.inf,-np.inf
minoff2,maxoff2 = np.inf,-np.inf
ttlangle,nangle = 0.0,0
scan = row['SCAN']
sourc = row['SOURC']
#tel = row['XTEL']
minid = ii
return data
@property
def tels(self):
if hasattr(self,'_tels'):
return self._tels
else:
self._tels = set([h['CTELE'] for h in self.allind])
#testing if CTELE even works
return self._tels
@property
def sources(self):
if hasattr(self,'_source'):
return self._source
else:
self._source = set([h['SOURC'] for h in self.allind])
return self._source
@property
def scans(self):
if hasattr(self,'_scan'):
return self._scan
else:
self._scan = set([h['SCAN'] for h in self.allind])
return self._scan
@property
def sci_sources(self):
return set([s for s in self.sources
if s[:4] not in ('SKY-', 'TSYS', 'TCAL', 'TREC', 'HOT-',
'COLD')])
@property
def lines(self):
if hasattr(self,'_lines'):
return self._lines
else:
self._lines = set([h['LINE'] for h in self.allind])
return self._lines
def _load_all_spectra(self, indices=None):
if indices is None:
indices = range(self.file_description['xnext']-1)
if hasattr(self, '_loaded_indices'):
indices_set = set(indices)
indices_to_load = (indices_set.difference(self._loaded_indices))
self._loaded_indices = self._loaded_indices.union(indices_set)
if any(indices_to_load):
pb = ProgressBar(len(indices_to_load))
for ii,k in enumerate(xrange(indices_to_load)):
self._spectra[k]
pb.update(ii)
else:
self._loaded_indices = set(indices)
self._spectra.load_all()
@property
def spectra(self):
return [x[0] for x in self._spectra]
@property
def headers(self):
return [self._spectra[ii][1]
if ii in self._spectra else x
for ii,x in enumerate(self.allind)]
def select_spectra(self,
all=None,
line=None,
linere=None,
linereflags=re.IGNORECASE,
number=None,
scan=None,
offset=None,
source=None,
sourcere=None,
sourcereflags=re.IGNORECASE,
range=None,
quality=None,
telescope=None,
telescopere=None,
telescopereflags=re.IGNORECASE,
subscan=None,
entry=None,
posang=None,
#observed=None,
#reduced=None,
frequency=None,
section=None,
user=None,
include_old_versions=False,
):
"""
Parameters
----------
include_old_versions: bool
Include spectra with XVER numbers <0? These are CLASS spectra that
have been "overwritten" (re-reduced?)
"""
if entry is not None and len(entry)==2:
return irange(entry[0], entry[1])
if frequency is not None:
self._load_all_spectra()
sel = [(re.search(re.escape(ensure_bytes(line)), h['LINE'], re.IGNORECASE)
if line is not None else True) and
(re.search(ensure_bytes(linere), h['LINE'], linereflags)
if linere is not None else True) and
(h['SCAN'] == scan if scan is not None else True) and
((h['OFF1'] == offset or
h['OFF2'] == offset) if offset is not None else True) and
(re.search(re.escape(ensure_bytes(source)), h['CSOUR'], re.IGNORECASE)
if source is not None else True) and
(re.search(ensure_bytes(sourcere), h['CSOUR'], sourcereflags)
if sourcere is not None else True) and
(h['OFF1']>range[0] and h['OFF1'] < range[1] and
h['OFF2']>range[2] and h['OFF2'] < range[3]
if range is not None and len(range)==4 else True) and
(h['QUAL'] == quality if quality is not None else True) and
(re.search(re.escape(ensure_bytes(telescope)), h['CTELE'], re.IGNORECASE)
if telescope is not None else True) and
(re.search(ensure_bytes(telescopere), h['CTELE'], telescopereflags)
if telescopere is not None else True) and
(h['SUBSCAN']==subscan if subscan is not None else True) and
('RESTF' in h and # Need to check that it IS a spectrum: continuum data can't be accessed this way
h['RESTF'] > frequency[0] and
h['RESTF'] < frequency[1]
if frequency is not None and len(frequency)==2
else True) and
(h['COMPPOSA']%180 > posang[0] and
h['COMPPOSA']%180 < posang[1]
if posang is not None and len(posang)==2
else True) and
# 1A uses XVER, 2A uses VER. If neither are present, it's
# probably not a valid spectrum?
(h.get('XVER', h.get('VER', -999)) > 0
if not include_old_versions else True)
for h in self.headers
]
return [ii for ii,k in enumerate(sel) if k]
def get_spectra(self, progressbar=True, **kwargs):
selected_indices = self.select_spectra(**kwargs)
if not any(selected_indices):
raise ValueError("Selection yielded empty.")
self._spectra.load(selected_indices, progressbar=progressbar)
return [self._spectra[ii] for ii in selected_indices]
def get_pyspeckit_spectra(self, progressbar=True, **kwargs):
spdata = self.get_spectra(progressbar=progressbar, **kwargs)
spectra = [pyspeckit.Spectrum(data=data,
xarr=make_axis(header),
header=clean_header(header))
for data,header in spdata]
return spectra
def read_observations(self, observation_indices, progressbar=True):
self._spectra.load(observation_indices, progressbar=progressbar)
return [self._spectra[ii] for ii in observation_indices]
@print_timing
def read_class(filename, downsample_factor=None, sourcename=None,
telescope=None, line=None, posang=None, verbose=False,
flag_array=None):
"""
Read a binary class file.
Based on the
`GILDAS CLASS file type Specification
<http://iram.fr/IRAMFR/GILDAS/doc/html/class-html/node58.html>`_
Parameters
----------
filename: str
downsample_factor: None or int
Factor by which to downsample data by averaging. Useful for
overresolved data.
sourcename: str or list of str
Source names to match to the data (uses regex)
telescope: str or list of str
'XTEL' or 'TELE' parameters: the telescope & instrument
line: str or list of str
The line name
posang: tuple of 2 floats
The first float is the minimum value for the position angle. The second
float is the maximum value for the position angle.
verbose: bool
Log messages with severity INFO
flag_array: np.ndarray
An array with the same shape as the data used to flag out
(remove) data when downsampling. True = flag out
"""
classobj = ClassObject(filename)
if not isinstance(sourcename, (list,tuple)):
sourcename = [sourcename]
if not isinstance(telescope, (list,tuple)):
telescope = [telescope]
if not isinstance(line, (list,tuple)):
line = [line]
spectra,headers = [],[]
if verbose:
log.info("Reading...")
selection = [ii
for source in sourcename
for tel in telescope
for li in line
for ii in classobj.select_spectra(sourcere=source,
telescope=tel,
line=li,
posang=posang)]
sphdr = classobj.read_observations(selection)
if len(sphdr) == 0:
return None
spec,hdr = zip(*sphdr)
spectra += spec
headers += hdr
indexes = headers
weight = ~flag_array if flag_array is not None else None
if downsample_factor is not None:
if verbose:
log.info("Downsampling...")
spectra = [downsample_1d(spec, downsample_factor,
weight=weight)
for spec in ProgressBar(spectra)]
headers = [downsample_header(h, downsample_factor)
for h in ProgressBar(headers)]
for hdr in headers:
stringify_header(hdr)
return spectra,headers,indexes
def stringify_header(header):
from six import string_types, integer_types
import string
FITS_allowed_types = (string_types + integer_types +
(float, complex, bool, np.floating, np.integer,
np.complexfloating, np.bool_))
bad_chars = string.printable[96:]
badcharre = re.compile("[{0}]".format(bad_chars))
for key, value in header.items():
if isinstance(value, bytes):
header[key] = value.decode()
elif not isinstance(value, FITS_allowed_types):
header[key] = badcharre.sub("", str(header[key]))
def downsample_header(hdr, downsample_factor):
for k in ('NCHAN','NPOIN','DATALEN'):
if k in hdr:
hdr[k] = int((hdr[k] / downsample_factor))
# maybe wrong? h['RCHAN'] = (h['RCHAN']-1) / downsample_factor + 1
scalefactor = 1./downsample_factor
hdr['RCHAN'] = (hdr['RCHAN']-1)*scalefactor + 0.5 + scalefactor/2.
for kw in ['FRES','VRES']:
if kw in hdr:
hdr[kw] *= downsample_factor
return hdr
def make_axis(header,imagfreq=False):
"""
Create a :class:`pyspeckit.spectrum.units.SpectroscopicAxis` from the CLASS "header"
"""
from .. import units
rest_frequency = header.get('RESTF')
xunits = 'MHz'
nchan = header.get('NCHAN')
voff = header.get('VOFF')
foff = header.get('FOFF')
doppler = header.get('DOPPLER')
fres = header.get('FRES')
refchan = header.get('RCHAN')
imfreq = header.get('IMAGE')
if foff in (None, 0.0) and voff not in (None, 0.0):
# Radio convention
foff = -voff/2.997924580e5 * rest_frequency
if not imagfreq:
xarr = rest_frequency + foff + (numpy.arange(1, nchan+1) - refchan) * fres
XAxis = units.SpectroscopicAxis(xarr,unit='MHz',refX=rest_frequency*u.MHz)
else:
xarr = imfreq - (numpy.arange(1, nchan+1) - refchan) * fres
XAxis = units.SpectroscopicAxis(xarr,unit='MHz',refX=imfreq*u.MHz)
return XAxis
@print_timing
def class_to_obsblocks(filename, telescope, line, datatuple=None, source=None,
imagfreq=False, DEBUG=False, **kwargs):
"""
Load an entire CLASS observing session into a list of ObsBlocks based on
matches to the 'telescope', 'line' and 'source' names
Parameters
----------
filename : string
The Gildas CLASS data file to read the spectra from.
telescope : list
List of telescope names to be matched.
line : list
List of line names to be matched.
source : list (optional)
List of source names to be matched. Defaults to None.
imagfreq : bool
Create a SpectroscopicAxis with the image frequency.
"""
if datatuple is None:
spectra,header,indexes = read_class(filename, **kwargs)
else:
spectra,header,indexes = datatuple
obslist = []
lastscannum = -1
spectrumlist = None
for sp,hdr,ind in zip(spectra,header,indexes):
hdr.update(ind)
# this is slow but necessary...
H = pyfits.Header()
for k,v in iteritems(hdr):
if hasattr(v,"__len__") and not isinstance(v,str):
# make an array of header entries, but this
# supports only up to 10 of them...
if len(v) > 1:
if len(v) < 10:
for ii,vv in enumerate(v):
newkey = k[:7]+str(ii)
H[newkey] = vv
elif len(v) < 100:
for ii,vv in enumerate(v):
newkey = k[:6]+str(ii)
H[newkey] = vv
else:
raise ValueError("Too many entries for {0}".format(k))
else:
H[k] = v[0]
#elif not any(x in str(v).lower() for x in ('comment', 'end', 'history')):
# # do not try to add comments...
# This commented out block used to attempt to reject comments
# using a private regex in the old pyfits which no longer exists.
# I don't know if it was necessary.
else:
H[k] = v
scannum = hdr['SCAN']
if 'XTEL' in hdr and hdr['XTEL'].strip() not in telescope:
continue
if hdr['LINE'].strip() not in line:
continue
if (source is not None) and (hdr['SOURC'].strip() not in source):
continue
hdr['RESTFREQ'] = hdr.get('RESTF')
H['RESTFREQ'] = hdr.get('RESTF')
#print "Did not skip %s,%s. Scannum, last: %i,%i" % (hdr['XTEL'],hdr['LINE'],scannum,lastscannum)
if scannum != lastscannum:
lastscannum = scannum
if spectrumlist is not None:
obslist.append(pyspeckit.ObsBlock(spectrumlist))
xarr = make_axis(hdr,imagfreq=imagfreq)
spectrumlist = [(
pyspeckit.Spectrum(xarr=xarr,
header=H,
data=sp))]
else:
spectrumlist.append(
pyspeckit.Spectrum(xarr=xarr,
header=H,
data=sp))
return obslist
class LazyItem(object):
"""
Simple lazy spectrum-retriever wrapper
"""
def __init__(self, parent):
self.parent = parent
self.sphdr = {}
self.nind = len(self.parent.allind)
self.nloaded = 0
def __repr__(self):
return ("Set of {0} spectra & headers, {1} loaded"
" ({2:0.2f}%)".format(self.nind, self.nloaded,
(float(self.nloaded)/self.nind)*100))
def load_all(self, progressbar=True):
self.load(range(self.nind))
def load(self, indices, progressbar=True):
pb = ProgressBar(len(indices))
counter = 0
for k in indices:
self[k]
counter += 1
pb.update(counter)
def __getitem__(self, key):
if key in self.sphdr:
return self.sphdr[key]
elif isinstance(key, slice):
return [self[k] for k in xrange(key.start or 0,
key.end or len(self.parent.allind),
key.step or 1)]
else:
sphd = read_observation(self.parent._file, key,
file_description=self.parent.file_description,
indices=self.parent.allind,
my_memmap=self.parent._data)
# Update the header with OTFSCAN and POSANG info
sphd[1].update(self.parent.allind[key])
self.sphdr[key] = sphd
self.nloaded += 1
return sphd
def __iter__(self):
return self.next()
def __next__(self):
for k in self.spheader:
yield self.spheader[k]
def __contains__(self, key):
return key in self.sphdr
@print_timing
def class_to_spectra(filename, datatuple=None, **kwargs):
"""
Load each individual spectrum within a CLASS file into a list of Spectrum
objects
"""
if datatuple is None:
spectra,header,indexes = read_class(filename, **kwargs)
else:
spectra,header,indexes = datatuple
spectrumlist = []
for sp,hdr,ind in zip(spectra,header,indexes):
hdr.update(ind)
xarr = make_axis(hdr)
spectrumlist.append(
pyspeckit.Spectrum(xarr=xarr,
header=hdr,
data=sp))
return pyspeckit.Spectra(spectrumlist)
def tests():
"""
Tests are specific to the machine on which this code was developed.
"""
fn1 = '/Users/adam/work/bolocam/hht/class_003.smt'
#fn1 = '/Users/adam/work/bolocam/hht/class_001.smt'
#fn1 = '/Users/adam/work/bolocam/hht/test_SMT-F1M-VU-20824-073.cls'
#fn2 = '/Users/adam/work/bolocam/hht/test_SMT-F1M-VU-79472+203.cls'
#F1 = read_class(fn1)#,DEBUG=True)
#F2 = read_class(fn2)
n2hp = class_to_obsblocks(fn1,telescope=['SMT-F1M-HU','SMT-F1M-VU'],line=['N2HP(3-2)','N2H+(3-2)'])
hcop = class_to_obsblocks(fn1,telescope=['SMT-F1M-HL','SMT-F1M-VL'],line=['HCOP(3-2)','HCO+(3-2)'])
| 42.555756
| 220
| 0.526813
|
from __future__ import print_function
from six.moves import xrange
from six import iteritems
import six
import astropy.io.fits as pyfits
import numpy
import numpy as np
from numpy import pi
from astropy import log
from astropy import units as u
import pyspeckit
import sys
import re
try:
from astropy.utils.console import ProgressBar
except ImportError:
ProgressBar = lambda x: None
ProgressBar.update = lambda x: None
import struct
import time
irange = range
def print_timing(func):
def wrapper(*arg,**kwargs):
t1 = time.time()
res = func(*arg,**kwargs)
t2 = time.time()
log.info('%s took %0.5g s' % (func.__name__, (t2-t1)))
return res
wrapper.__doc__ = func.__doc__
return wrapper
def ensure_bytes(string):
if six.PY3:
return bytes(string, 'utf-8')
else:
return str(string)
filetype_dict = {'1A ':'Multiple_IEEE',
'1 ':'Multiple_Vax',
'1B ':'Multiple_EEEI',
'2A ':'v2',
'2 ':'v2',
'2B ':'v2',
'9A ':'Single_IEEE',
'9 ':'Single_Vax',
'9B ':'Single_EEEI'}
for key in list(filetype_dict.keys()):
filetype_dict[ensure_bytes(key)] = filetype_dict[key]
fileversion_dict = {'1A ':'v1',
'2A ':'v2',
'9A ':'v1',
}
for key in list(fileversion_dict.keys()):
fileversion_dict[ensure_bytes(key)] = fileversion_dict[key]
record_lengths = {'1A': 512,
'2A': 1024*4}
header_id_numbers = {0: 'USER CODE',
-1: 'COMMENT',
-2: 'GENERAL',
-3: 'POSITION',
-4: 'SPECTRO',
-5: 'BASELINE',
-6: 'HISTORY',
-7: 'UNKNOWN-APEX',
-9: 'GAUSSFIT',
-10: 'DRIFT',
-11: 'BEAMSWITCH',
-12: 'SHELLFIT',
-13: 'NH3FIT',
-14: 'CALIBRATION',
-18: 'ABSFIT',
}
header_id_lengths = {-2: 9,
-3: 17,
-4: 17,
-5: None,
-6: 3,
-14: 25,
}
filedescv2_nw1=14
keys_lengths = {
'unknown': [
'int32'),
('TELES' ,3,'|S12') ,
('DOBS' ,1,'int32'),
('DRED' ,1,'int32'),
('TYPEC' ,1,'int32'),
('KIND' ,1,'int32'),
('QUAL' ,1,'int32'),
('SCAN' ,1,'int32'),
('SUBSCAN' ,1,'int32'),
],
'COMMENT': [
('LTEXT',1,'int32'),
('CTEXT',1024//4,'|S1024'),
],
'GENERAL': [
('UT' ,2,'float64'),
('ST' ,2,'float64'),
('AZ' ,1,'float32'),
('EL' ,1,'float32'),
('TAU' ,1,'float32'),
('TSYS' ,1,'float32'),
('TIME' ,1,'float32'),
C',3,'|S12') ,
('EPOCH',1,'float32'),
('LAM' ,2,'float64'),
('BET' ,2,'float64'),
('LAMOF',1,'float32'),
('BETOF',1,'float32'),
('PROJ' ,1,'int32') ,
('SL0P' ,1,'float64'),
('RESTF' ,2,'float64'),
('NCHAN' ,1,'int32'),
('RCHAN' ,1,'float32'),
('FRES' ,1,'float32'),
('FOFF' ,1,'float32'),
('VRES' ,1,'float32'),
('VOFF' ,1,'float32'),
('BAD' ,1,'float32'),
'),
,
('DOPPLER',2,'float64'),
],
'CALIBRATION': [
('ALIGN',1,'int32'),
('BEEFF',1,'float32'), # [ ] Beam efficiency
('FOEFF',1,'float32'), # [ ] Forward efficiency
('GAINI',1,'float32'), # [ ] Image/Signal gain ratio
('H2OMM',1,'float32'), # [ mm] Water vapor content
('PAMB',1,'float32'), # [ hPa] Ambient pressure
('TAMB',1,'float32'), # [ K] Ambient temperature
('TATMS',1,'float32'), # [ K] Atmosphere temp. in signal band
('TCHOP',1,'float32'), # [ K] Chopper temperature
('TCOLD',1,'float32'), # [ K] Cold load temperature
('TAUS',1,'float32'), # [neper] Opacity in signal band
('TAUI',1,'float32'), # [neper] Opacity in image band
('TATMI',1,'float32'), # [ K] Atmosphere temp. in image band
('TREC',1,'float32'), # [ K] Receiver temperature
('CMODE',1,'int32'), # [ code] Calibration mode
('ATFAC',1,'float32'), # [ ] Applied calibration factor
('ALTI',1,'float32'), # [ m] Site elevation
('COUNT',3,'3float32'), # [count] Power of Atm., Chopp., Cold
('LCALOF',1,'float32'), # [ rad] Longitude offset for sky measurement
('BCALOF',1,'float32'), # [ rad] Latitude offset for sky measurement
('GEOLONG',1,'float64'), # [ rad] Geographic longitude of observatory # MAY NOT EXIST IN OLD CLASS
('GEOLAT',1,'float64'), # [ rad] Geographic latitude of observatory # MAY NOT EXIST IN OLD CLASS
],
'BASELINE':[
('DEG',1,'int32'), #! [ ] Degree of last baseline
('SIGFI',1,'float32'), #! [Int. unit] Sigma
('AIRE',1,'float32'), #! [Int. unit] Area under windows
('NWIND',1,'int32'), #! [ ] Number of line windows
# WARNING: These should probably have 'n', the second digit, = NWIND
# The docs are really unclear about this, they say "W1(MWIND)"
('W1MWIND',1,'float32'), #! [km/s] Lower limits of windows
('W2MWIND',1,'float32'), #! [km/s] Upper limits of windows
('SINUS',3,'float32'), #![] Sinus baseline results
],
'DRIFT':[ # 16?
('FREQ',1,'float64') , #! [ MHz] Rest frequency real(kind=8) ::
('WIDTH',1,'float32'), #! [ MHz] Bandwidth real(kind=4) ::
('NPOIN',1,'int32') , #! [ ] Number of data points integer(kind=4) ::
('RPOIN',1,'float32'), #! [ ] Reference point real(kind=4) ::
('TREF',1,'float32') , #! [ ?] Time at reference real(kind=4) ::
('AREF',1,'float32') , #! [ rad] Angular offset at ref. real(kind=4) ::
('APOS',1,'float32') , #! [ rad] Position angle of drift real(kind=4) ::
('TRES',1,'float32') , #! [ ?] Time resolution real(kind=4) ::
('ARES',1,'float32') , #! [ rad] Angular resolution real(kind=4) ::
('BAD',1,'float32') , #! [ ] Blanking value real(kind=4) ::
('CTYPE',1,'int32') , #! [code] Type of offsets integer(kind=4) ::
('CIMAG',1,'float64'), #! [ MHz] Image frequency real(kind=8) ::
('COLLA',1,'float32'), #! [ ?] Collimation error Az real(kind=4) ::
('COLLE',1,'float32'), #! [ ?] Collimation error El real(kind=4) ::
],
}
def _read_bytes(f, n):
return f.read(n)
def _read_byte(f):
return numpy.uint8(struct.unpack('=B', f.read(4)[:1])[0])
def _read_int16(f):
return numpy.int16(struct.unpack('=h', f.read(4)[2:4])[0])
def _read_int32(f):
return numpy.int32(struct.unpack('=i', f.read(4))[0])
def _read_int64(f):
return numpy.int64(struct.unpack('=q', f.read(8))[0])
def _read_float32(f):
return numpy.float32(struct.unpack('=f', f.read(4))[0])
def _align_32(f):
pos = f.tell()
if pos % 4 != 0:
f.seek(pos + 4 - pos % 4)
return
def _read_word(f,length):
if length > 0:
chars = _read_bytes(f, length)
_align_32(f)
else:
chars = None
return chars
def _read_int(f):
return struct.unpack('i',f.read(4))
def is_ascii(s):
return len(s) == len(s.decode('ascii').encode('utf-8'))
def is_all_null(s):
return all(x=='\x00' for x in s) or all(x==b'\x00' for x in s)
def _read_indices(f, file_description):
#if file_description['version'] in (1,2):
# extension_positions = (file_description['aex']-1)*file_description['reclen']*4
# all_indices = {extension:
# [_read_index(f,
# filetype=file_description['version'],
# entry=ii,
# #position=position,
# )
# for ii in range(file_description['lex1'])]
# for extension,position in enumerate(extension_positions)
# if position > 0
# }
#elif file_description['version'] == 1:
extension_positions = ((file_description['aex'].astype('int64')-1)
*file_description['reclen']*4)
all_indices = [_read_index(f,
filetype=file_description['version'],
# 1-indexed files
entry_number=ii+1,
file_description=file_description,
)
for ii in range(file_description['xnext']-1)]
#else:
# raise ValueError("Invalid file version {0}".format(file_description['version']))
return all_indices
def _find_index(entry_number, file_description, return_position=False):
if file_description['gex'] == 10:
kex=(entry_number-1)//file_description['lex1'] + 1
else:
# exponential growth:
#kex = gi8_dicho(file_description['nex'], file_description['lexn'], entry_number) - 1
kex = len([xx for xx in file_description['lexn'] if xx<entry_number])
ken = entry_number - file_description['lexn'][kex-1]
#! Find ken (relative entry number in the extension, starts from 1)
#ken = entry_num - file%desc%lexn(kex-1)
kb = ((ken-1)*file_description['lind'])//file_description['reclen']
#kb = ((ken-1)*file%desc%lind)/file%desc%reclen ! In the extension, the
# ! relative record position (as an offset, starts from 0) where the
# ! Entry Index starts. NB: there can be a non-integer number of Entry
# ! Indexes per record
# Subtract 1: 'aex' is 1-indexed
kbl = (file_description['aex'][kex-1]+kb)-1
# kbl = file%desc%aex(kex)+kb ! The absolute record number where the Entry Index goes
k = ((ken-1)*file_description['lind']) % file_description['reclen']
#k = mod((ken-1)*file%desc%lind,file%desc%reclen)+1 ! = in the record, the
# ! first word of the Entry Index of the entry number 'entry_num'
if return_position:
return (kbl*file_description['reclen']+k)*4
else:
return kbl,k
def _read_index(f, filetype='v1', DEBUG=False, clic=False, position=None,
entry_number=None, file_description=None):
if position is not None:
f.seek(position)
if entry_number is not None:
indpos = _find_index(entry_number, file_description, return_position=True)
f.seek(indpos)
x0 = f.tell()
if filetype in ('1A ','v1', 1):
log.debug('Index filetype 1A')
index = {
"XBLOC":_read_int32(f),
"XNUM":_read_int32(f),
"XVER":_read_int32(f),
"XSOURC":_read_word(f,12),
"XLINE":_read_word(f,12),
"XTEL":_read_word(f,12),
"XDOBS":_read_int32(f),
"XDRED":_read_int32(f),
"XOFF1":_read_float32(f),# first offset (real, radians)
"XOFF2":_read_float32(f),# second offset (real, radians)
"XTYPE":_read_int32(f),# coordinate system ('EQ'', 'GA', 'HO')
"XKIND":_read_int32(f),
"XQUAL":_read_int32(f),
"XSCAN":_read_int32(f),
}
index['BLOC'] = index['XBLOC']
index['WORD'] = 1
index['SOURC'] = index['CSOUR'] = index['XSOURC']
index['DOBS'] = index['CDOBS'] = index['XDOBS']
index['CTELE'] = index['XTEL']
index['LINE'] = index['XLINE']
index['OFF1'] = index['XOFF1']
index['OFF2'] = index['XOFF2']
index['QUAL'] = index['XQUAL']
index['SCAN'] = index['XSCAN']
index['KIND'] = index['XKIND']
if clic:
nextchunk = {
"XPROC":_read_int32(f),
"XITYPE":_read_int32(f),
"XHOURANG":_read_float32(f),
"XPROJNAME":_read_int32(f),
"XPAD1":_read_int32(f),
"XBPC" :_read_int32(f),
"XIC" :_read_int32(f),
"XRECEI" :_read_int32(f),
"XUT":_read_float32(f),
"XPAD2":numpy.fromfile(f,count=3,dtype='int32')
}
else:
nextchunk = {"XPOSA":_read_float32(f),
"XSUBSCAN":_read_int32(f),
'XPAD2': numpy.fromfile(f,count=10,dtype='int32'),
}
nextchunk['SUBSCAN'] = nextchunk['XSUBSCAN']
nextchunk['POSA'] = nextchunk['XPOSA']
index.update(nextchunk)
if (f.tell() - x0 != 128):
missed_bits = (f.tell()-x0)
X = f.read(128-missed_bits)
if DEBUG: print("read_index missed %i bits: %s" % (128-missed_bits,X))
if any(not is_ascii(index[x]) for x in ('XSOURC','XLINE','XTEL')):
raise ValueError("Invalid index read from {0}.".format(x0))
elif filetype in ('2A ','v2', 2):
log.debug('Index filetype 2A')
index = {
"BLOC" : _read_int64(f) ,
"WORD" : _read_int32(f) ,
"NUM" : _read_int64(f) ,
"VER" : _read_int32(f) ,
"CSOUR" : _read_word(f,12),
"CLINE" : _read_word(f,12),
"CTELE" : _read_word(f,12),
"DOBS" : _read_int32(f) ,
"DRED" : _read_int32(f) ,
"OFF1" : _read_float32(f),
"OFF2" : _read_float32(f),
"TYPE" : _read_int32(f) ,
"KIND" : _read_int32(f) ,
"QUAL" : _read_int32(f) ,
"POSA" : _read_float32(f),
"SCAN" : _read_int64(f) ,
"SUBSCAN": _read_int32(f) ,
}
if any((is_all_null(index[x]) or not is_ascii(index[x]))
for x in ('CSOUR','CLINE','CTELE')):
raise ValueError("Invalid index read from {0}.".format(x0))
index['SOURC'] = index['XSOURC'] = index['CSOUR']
index['LINE'] = index['XLINE'] = index['CLINE']
index['XKIND'] = index['KIND']
try:
index['DOBS'] = index['XDOBS'] = index['CDOBS']
except KeyError:
index['CDOBS'] = index['XDOBS'] = index['DOBS']
else:
raise NotImplementedError("Filetype {0} not implemented.".format(filetype))
index['MJD'] = index['DOBS'] + 60549
class_dobs = index['DOBS']
index['DOBS'] = ((class_dobs + 365*2025)/365.2425 + 1)
log.debug("Indexing finished at {0}".format(f.tell()))
return index
def _read_header(f, type=0, position=None):
if position is not None:
f.seek(position)
if type in keys_lengths:
hdrsec = [(x[0],numpy.fromfile(f,count=1,dtype=x[2])[0])
for x in keys_lengths[type]]
return dict(hdrsec)
else:
return {}
raise ValueError("Unrecognized type {0}".format(type))
def _read_first_record(f):
f.seek(0)
filetype = f.read(4)
if fileversion_dict[filetype] == 'v1':
return _read_first_record_v1(f)
elif fileversion_dict[filetype] == 'v2':
return _read_first_record_v2(f)
else:
raise ValueError("Unrecognized filetype {0}".format(filetype))
def _read_first_record_v1(f, record_length_words=128):
)
file_description = {
'code': f.read(4),
'next': _read_int32(f),
'lex': _read_int32(f),
'nex': _read_int32(f),
'xnext': _read_int32(f),
'gex': 10.,
'vind': 1,
'version': 1,
'nextrec': 3,
'nextword': 1,
'lind': 32,
'kind': 'unknown',
'flags': 0,
}
file_description['reclen'] = record_length_words
ex = np.fromfile(f, count=(record_length_words*2-5), dtype='int32')
file_description['ex'] = ex[ex!=0]
file_description['nextrec'] = file_description['next']
file_description['lex1'] = file_description['lex'] # number of entries
file_description['lexn'] = (np.arange(file_description['nex']+1) *
file_description['lex1'])
file_description['nentries'] = np.sum(file_description['lexn'])
file_description['aex'] = file_description['ex'][:file_description['nex']]
#file_description['version'] = fileversion_dict[file_description['code']]
assert f.tell() == 1024
# Something is not quite right with the 'ex' parsing
#assert len(file_description['ex']) == file_description['nex']
return file_description
def _read_first_record_v2(f):
f.seek(0)
file_description = {
'code': f.read(4),
'reclen': _read_int32(f),
'kind': _read_int32(f),
'vind': _read_int32(f),
'lind': _read_int32(f),
'flags': _read_int32(f),
'xnext': _read_int64(f),
'nextrec': _read_int64(f),
'nextword': _read_int32(f),
'lex1': _read_int32(f),
'nex': _read_int32(f),
'gex': _read_int32(f),
}
file_description['lexn'] = [0]
if file_description['gex'] == 10:
for ii in range(1, file_description['nex']+1):
file_description['lexn'].append(file_description['lexn'][-1]+file_description['lex1'])
else:
#! Exponential growth. Only growth with mantissa 2.0 is supported
for ii in range(1, file_description['nex']):
# I don't know what the fortran does here!!!
nent = int(file_description['lex1'] * 2**(ii-1))
file_description['lexn'].append(file_description['lexn'][-1]+nent)
file_description['nentries'] = np.sum(file_description['lexn'])
record_length_words = file_description['reclen']
aex = numpy.fromfile(f, count=(record_length_words-15)//2, dtype='int64')
file_description['aex'] = aex[aex!=0]
assert len(file_description['aex']) == file_description['nex']
file_description['version'] = 2
return file_description
def gi8_dicho(ninp,lexn,xval,ceil=True):
iinf = 1
isup = ninp
while isup > (iinf+1):
imid = int(np.floor((isup + iinf)/2.))
if (lexn[imid-1] < xval):
iinf = imid
else:
isup = imid
ival = isup
return ival
def _read_obshead(f, file_description, position=None, verbose=False):
if file_description['version'] == 1:
return _read_obshead_v1(f, position=position, verbose=verbose)
if file_description['version'] == 2:
return _read_obshead_v2(f, position=position)
else:
raise ValueError("Invalid file version {0}.".
format(file_description['version']))
def _read_obshead_v2(f, position=None):
if position is not None:
f.seek(position)
else:
position = f.tell()
IDcode = f.read(4)
if IDcode.strip() != b'2':
raise IndexError("Observation Header reading failure at {0}. "
"Record does not appear to be an observation header.".
format(position))
f.seek(position)
entrydescv2_nw1 = 11
entrydescv2_nw2 = 5
obshead = {
'CODE': f.read(4),
'VERSION': _read_int32(f),
'NSEC': _read_int32(f),
'NWORD': _read_int64(f),
'ADATA': _read_int64(f),
'LDATA': _read_int64(f),
'XNUM': _read_int64(f),
}
section_numbers = np.fromfile(f, count=obshead['NSEC'], dtype='int32')
section_lengths = np.fromfile(f, count=obshead['NSEC'], dtype='int64')
section_addresses = np.fromfile(f, count=obshead['NSEC'], dtype='int64')
return obshead['XNUM'],obshead,dict(zip(section_numbers,section_addresses))
def _read_obshead_v1(f, position=None, verbose=False):
if position is not None:
f.seek(position)
IDcode = f.read(4)
if IDcode.strip() != b'2':
raise IndexError("Observation Header reading failure at {0}. "
"Record does not appear to be an observation header.".
format(f.tell() - 4))
(nblocks, nbyteob, data_address, nheaders, data_length, obindex, nsec,
obsnum) = numpy.fromfile(f, count=8, dtype='int32')
if verbose:
print("nblocks,nbyteob,data_address,data_length,nheaders,obindex,nsec,obsnum",nblocks,nbyteob,data_address,data_length,nheaders,obindex,nsec,obsnum)
print("DATA_LENGTH: ",data_length)
seccodes = numpy.fromfile(f,count=nsec,dtype='int32')
seclen = numpy.fromfile(f,count=nsec,dtype='int32')
secaddr = numpy.fromfile(f,count=nsec,dtype='int32')
if verbose:
print("Section codes, addresses, lengths: ",seccodes,secaddr,seclen)
hdr = {'NBLOCKS':nblocks, 'NBYTEOB':nbyteob, 'DATAADDR':data_address,
'DATALEN':data_length, 'NHEADERS':nheaders, 'OBINDEX':obindex,
'NSEC':nsec, 'OBSNUM':obsnum}
return obsnum,hdr,dict(zip(seccodes,secaddr))
# Not entirely clear what this is, but it is stuff that precedes the actual data
#
# Looks something like this:
# array([ 1, -2, -3, -4, -14,
# 9, 17, 18, 25, 55,
# 64, 81, 99, -1179344801, 979657591,
#
# -2, -3, -4, -14 indicate the 4 header types
# 9,17,18,25 *MAY* indicate the number of bytes in each
#
#
# HOW is it indicated how many entries there are?
# """
or=np.mean, weight=None):
if myarr.ndim != 1:
raise ValueError("Only works on 1d data. Says so in the title.")
xs = myarr.size
crarr = myarr[:xs-(xs % int(factor))]
if weight is None:
dsarr = estimator(np.concatenate([[crarr[i::factor] for i in
range(factor)]]),axis=0)
else:
dsarr = estimator(np.concatenate([[crarr[i::factor]*weight[i::factor] for i in
range(factor)]]),axis=0)
warr = estimator(np.concatenate([[weight[i::factor] for i in
range(factor)]]),axis=0)
dsarr = dsarr/warr
return dsarr
def test_downsample1d():
data = np.arange(10)
weight = np.ones(10)
weight[5]=0
assert np.all(downsample_1d(data, 2, weight=weight, estimator=np.mean) ==
np.array([0.5, 2.5, 4.0, 6.5, 8.5]))
def read_observation(f, obsid, file_description=None, indices=None,
my_memmap=None, memmap=True, verbose=False):
if isinstance(f, str):
f = open(f,'rb')
opened = True
if memmap:
my_memmap = numpy.memmap(f, offset=0, dtype='float32',
mode='r')
else:
my_memmap = None
elif my_memmap is None and memmap:
raise ValueError("Must pass in a memmap object if passing in a file object.")
else:
opened = False
if file_description is None:
file_description = _read_first_record(f)
if indices is None:
indices = _read_indices(f, file_description)
index = indices[obsid]
obs_position = (index['BLOC']-1)*file_description['reclen']*4 + (index['WORD']-1)*4
log.debug("Reading observation at position {0}".format(obs_position))
obsnum,obshead,sections = _read_obshead(f, file_description,
position=obs_position,
verbose=verbose)
header = obshead
datastart = 0
for section_id,section_address in iteritems(sections):
sec_position = obs_position + (section_address-1)*4
temp_hdr = _read_header(f, type=header_id_numbers[section_id],
position=sec_position)
header.update(temp_hdr)
datastart = max(datastart,f.tell())
hdr = header
hdr.update(obshead)
hdr.update({'OBSNUM':obsnum,'RECNUM':obsid})
hdr.update({'RA':hdr['LAM']/pi*180,'DEC':hdr['BET']/pi*180})
hdr.update({'RAoff':hdr['LAMOF']/pi*180,'DECoff':hdr['BETOF']/pi*180})
hdr.update({'OBJECT':hdr['SOURC'].strip()})
hdr.update({'BUNIT':'Tastar'})
hdr.update({'EXPOSURE':float(hdr['TIME'])})
hdr['HDRSTART'] = obs_position
hdr['DATASTART'] = datastart
hdr.update(indices[obsid])
hdr.update({'OBSDATE': hdr['MJD'] + hdr['UT']/2./pi})
if hdr['KIND'] == 1:
nchan = hdr['NPOIN']
elif 'NCHAN' in hdr:
nchan = hdr['NCHAN']
else:
log.error("No NCHAN in header. This is not a spectrum.")
import ipdb; ipdb.set_trace()
f.seek(datastart-1)
spec = _read_spectrum(f, position=datastart-1, nchan=nchan,
memmap=memmap, my_memmap=my_memmap)
if opened:
f.close()
return spec, hdr
def _read_spectrum(f, position, nchan, my_memmap=None, memmap=True):
if position != f.tell():
log.warning("Reading data from {0}, but the file is wound "
"to {1}.".format(position, f.tell()))
if memmap:
here = position
spectrum = my_memmap[here//4:here//4+nchan]
f.seek(here+nchan*4)
else:
f.seek(position)
spectrum = numpy.fromfile(f,count=nchan,dtype='float32')
return spectrum
def _spectrum_from_header(fileobj, header, memmap=None):
return _read_spectrum(fileobj, position=header['DATASTART'],
nchan=header['NCHAN'] if 'NCHAN' in hdr else hdr['NPOIN'],
my_memmap=memmap)
def clean_header(header):
newheader = {}
for k in header:
if not isinstance(header[k], (int, float, str)):
if isinstance(header[k], np.ndarray) and header[k].size > 1:
if header[k].size > 10:
raise ValueError("Large array being put in header. That's no good. key={0}".format(k))
for ii,val in enumerate(header[k]):
newheader[k[:7]+str(ii)] = val
else:
newheader[k[:8]] = str(header[k])
else:
newheader[k[:8]] = header[k]
return newheader
class ClassObject(object):
def __init__(self, filename, verbose=False):
t0 = time.time()
self._file = open(filename, 'rb')
self.file_description = _read_first_record(self._file)
self.allind = _read_indices(self._file, self.file_description)
self._data = np.memmap(self._file, dtype='float32', mode='r')
if verbose: log.info("Setting _spectra")
self._spectra = LazyItem(self)
t1 = time.time()
if verbose: log.info("Setting posang. t={0}".format(t1-t0))
self.set_posang()
t2 = time.time()
if verbose: log.info("Identifying otf scans. t={0}".format(t2-t1))
self._identify_otf_scans(verbose=verbose)
t3 = time.time()
#self._load_all_spectra()
if verbose:
log.info("Loaded CLASS object with {3} indices. Time breakdown:"
" {0}s for indices, "
"{1}s for posang, and {2}s for OTF scan identification"
.format(t1-t0, t2-t1, t3-t2, len(self.allind)))
def __repr__(self):
s = "\n".join(["{k}: {v}".format(k=k,v=v)
for k,v in iteritems(self.getinfo())])
return "ClassObject({id}) with {nspec} entries\n".format(id=id(self),
nspec=len(self.allind)) + s
def getinfo(self, allsources=False):
info = dict(
tels = self.tels,
lines = self.lines,
scans = self.scans,
sources = self.sources if allsources else self.sci_sources,
)
return info
def set_posang(self):
h0 = self.headers[0]
for h in self.headers:
dx = h['OFF1'] - h0['OFF1']
dy = h['OFF2'] - h0['OFF2']
h['COMPPOSA'] = np.arctan2(dy,dx)*180/np.pi
h0 = h
def _identify_otf_scans(self, verbose=False):
h0 = self.allind[0]
st = 0
otfscan = 0
posangs = [h['COMPPOSA'] for h in self.allind]
if verbose:
pb = ProgressBar(len(self.allind))
for ii,h in enumerate(self.allind):
if (h['SCAN'] != h0['SCAN']
or h['SOURC'] != h0['SOURC']):
h0['FIRSTSCAN'] = st
cpa = np.median(posangs[st:ii])
for hh in self.allind[st:ii]:
hh['SCANPOSA'] = cpa % 180
st = ii
if h['SCAN'] == h0['SCAN']:
h0['OTFSCAN'] = otfscan
otfscan += 1
h['OTFSCAN'] = otfscan
else:
otfscan = 0
h['OTFSCAN'] = otfscan
else:
h['OTFSCAN'] = otfscan
if verbose:
pb.update(ii)
def listscans(self, source=None, telescope=None, out=sys.stdout):
minid=0
scan = -1
sourc = ""
#tel = ''
minoff1,maxoff1 = np.inf,-np.inf
minoff2,maxoff2 = np.inf,-np.inf
ttlangle,nangle = 0.0,0
print("{entries:15s} {SOURC:12s} {XTEL:12s} {SCAN:>8s} {SUBSCAN:>8s} "
"[ {RAmin:>12s}, {RAmax:>12s} ] "
"[ {DECmin:>12s}, {DECmax:>12s} ] "
"{angle:>12s} {SCANPOSA:>12s} {OTFSCAN:>8s} {TSYS:>8s} {UTD:>12s}"
.format(entries='Scans', SOURC='Source', XTEL='Telescope',
SCAN='Scan', SUBSCAN='Subscan',
RAmin='min(RA)', RAmax='max(RA)',
DECmin='min(DEC)', DECmax='max(DEC)',
SCANPOSA='Scan PA',
angle='Angle', OTFSCAN='OTFscan',
TSYS='TSYS', UTD='UTD'),
file=out)
data_rows = []
for ii,row in enumerate(self.headers):
if (row['SCAN'] == scan
and row['SOURC'] == sourc
#and row['XTEL'] == tel
):
minoff1 = min(minoff1, row['OFF1'])
maxoff1 = max(maxoff1, row['OFF1'])
minoff2 = min(minoff2, row['OFF2'])
maxoff2 = max(maxoff2, row['OFF2'])
ttlangle += np.arctan2(row['OFF2'] - prevrow['OFF2'],
row['OFF1'] - prevrow['OFF1'])%np.pi
nangle += 1
prevrow = row
else:
if scan == -1:
scan = row['SCAN']
sourc = row['SOURC']
#tel = row['XTEL']
prevrow = row
continue
ok = True
if source is not None:
if isinstance(source, (list,tuple)):
ok = ok and any(re.search((s), prevrow['SOURC'])
for s in source)
else:
ok = ok and re.search((source), prevrow['SOURC'])
if telescope is not None:
ok = ok and re.search((telescope), prevrow['XTEL'])
if ok:
data = dict(RAmin=minoff1*180/np.pi*3600,
RAmax=maxoff1*180/np.pi*3600,
DECmin=minoff2*180/np.pi*3600,
DECmax=maxoff2*180/np.pi*3600,
angle=(ttlangle/nangle)*180/np.pi if nangle>0 else 0,
e0=minid,
e1=ii-1,
#TSYS=row['TSYS'] if 'TSYS' in row else '--',
UTD=row['DOBS']+row['UT'] if 'UT' in row else -99,
**prevrow)
print("{e0:7d}-{e1:7d} {SOURC:12s} {XTEL:12s} {SCAN:8d} {SUBSCAN:8d} "
"[ {RAmin:12f}, {RAmax:12f} ] "
"[ {DECmin:12f}, {DECmax:12f} ] "
"{angle:12.1f} {SCANPOSA:12.1f} {OTFSCAN:8d}"
" {TSYS:>8.1f} {UTD:12f}".
format(**data),
file=out)
data_rows.append(data)
minoff1,maxoff1 = np.inf,-np.inf
minoff2,maxoff2 = np.inf,-np.inf
ttlangle,nangle = 0.0,0
scan = row['SCAN']
sourc = row['SOURC']
#tel = row['XTEL']
minid = ii
return data
@property
def tels(self):
if hasattr(self,'_tels'):
return self._tels
else:
self._tels = set([h['CTELE'] for h in self.allind])
#testing if CTELE even works
return self._tels
@property
def sources(self):
if hasattr(self,'_source'):
return self._source
else:
self._source = set([h['SOURC'] for h in self.allind])
return self._source
@property
def scans(self):
if hasattr(self,'_scan'):
return self._scan
else:
self._scan = set([h['SCAN'] for h in self.allind])
return self._scan
@property
def sci_sources(self):
return set([s for s in self.sources
if s[:4] not in ('SKY-', 'TSYS', 'TCAL', 'TREC', 'HOT-',
'COLD')])
@property
def lines(self):
if hasattr(self,'_lines'):
return self._lines
else:
self._lines = set([h['LINE'] for h in self.allind])
return self._lines
def _load_all_spectra(self, indices=None):
if indices is None:
indices = range(self.file_description['xnext']-1)
if hasattr(self, '_loaded_indices'):
indices_set = set(indices)
indices_to_load = (indices_set.difference(self._loaded_indices))
self._loaded_indices = self._loaded_indices.union(indices_set)
if any(indices_to_load):
pb = ProgressBar(len(indices_to_load))
for ii,k in enumerate(xrange(indices_to_load)):
self._spectra[k]
pb.update(ii)
else:
self._loaded_indices = set(indices)
self._spectra.load_all()
@property
def spectra(self):
return [x[0] for x in self._spectra]
@property
def headers(self):
return [self._spectra[ii][1]
if ii in self._spectra else x
for ii,x in enumerate(self.allind)]
def select_spectra(self,
all=None,
line=None,
linere=None,
linereflags=re.IGNORECASE,
number=None,
scan=None,
offset=None,
source=None,
sourcere=None,
sourcereflags=re.IGNORECASE,
range=None,
quality=None,
telescope=None,
telescopere=None,
telescopereflags=re.IGNORECASE,
subscan=None,
entry=None,
posang=None,
#observed=None,
#reduced=None,
frequency=None,
section=None,
user=None,
include_old_versions=False,
):
if entry is not None and len(entry)==2:
return irange(entry[0], entry[1])
if frequency is not None:
self._load_all_spectra()
sel = [(re.search(re.escape(ensure_bytes(line)), h['LINE'], re.IGNORECASE)
if line is not None else True) and
(re.search(ensure_bytes(linere), h['LINE'], linereflags)
if linere is not None else True) and
(h['SCAN'] == scan if scan is not None else True) and
((h['OFF1'] == offset or
h['OFF2'] == offset) if offset is not None else True) and
(re.search(re.escape(ensure_bytes(source)), h['CSOUR'], re.IGNORECASE)
if source is not None else True) and
(re.search(ensure_bytes(sourcere), h['CSOUR'], sourcereflags)
if sourcere is not None else True) and
(h['OFF1']>range[0] and h['OFF1'] < range[1] and
h['OFF2']>range[2] and h['OFF2'] < range[3]
if range is not None and len(range)==4 else True) and
(h['QUAL'] == quality if quality is not None else True) and
(re.search(re.escape(ensure_bytes(telescope)), h['CTELE'], re.IGNORECASE)
if telescope is not None else True) and
(re.search(ensure_bytes(telescopere), h['CTELE'], telescopereflags)
if telescopere is not None else True) and
(h['SUBSCAN']==subscan if subscan is not None else True) and
('RESTF' in h and # Need to check that it IS a spectrum: continuum data can't be accessed this way
h['RESTF'] > frequency[0] and
h['RESTF'] < frequency[1]
if frequency is not None and len(frequency)==2
else True) and
(h['COMPPOSA']%180 > posang[0] and
h['COMPPOSA']%180 < posang[1]
if posang is not None and len(posang)==2
else True) and
# probably not a valid spectrum?
(h.get('XVER', h.get('VER', -999)) > 0
if not include_old_versions else True)
for h in self.headers
]
return [ii for ii,k in enumerate(sel) if k]
def get_spectra(self, progressbar=True, **kwargs):
selected_indices = self.select_spectra(**kwargs)
if not any(selected_indices):
raise ValueError("Selection yielded empty.")
self._spectra.load(selected_indices, progressbar=progressbar)
return [self._spectra[ii] for ii in selected_indices]
def get_pyspeckit_spectra(self, progressbar=True, **kwargs):
spdata = self.get_spectra(progressbar=progressbar, **kwargs)
spectra = [pyspeckit.Spectrum(data=data,
xarr=make_axis(header),
header=clean_header(header))
for data,header in spdata]
return spectra
def read_observations(self, observation_indices, progressbar=True):
self._spectra.load(observation_indices, progressbar=progressbar)
return [self._spectra[ii] for ii in observation_indices]
@print_timing
def read_class(filename, downsample_factor=None, sourcename=None,
telescope=None, line=None, posang=None, verbose=False,
flag_array=None):
classobj = ClassObject(filename)
if not isinstance(sourcename, (list,tuple)):
sourcename = [sourcename]
if not isinstance(telescope, (list,tuple)):
telescope = [telescope]
if not isinstance(line, (list,tuple)):
line = [line]
spectra,headers = [],[]
if verbose:
log.info("Reading...")
selection = [ii
for source in sourcename
for tel in telescope
for li in line
for ii in classobj.select_spectra(sourcere=source,
telescope=tel,
line=li,
posang=posang)]
sphdr = classobj.read_observations(selection)
if len(sphdr) == 0:
return None
spec,hdr = zip(*sphdr)
spectra += spec
headers += hdr
indexes = headers
weight = ~flag_array if flag_array is not None else None
if downsample_factor is not None:
if verbose:
log.info("Downsampling...")
spectra = [downsample_1d(spec, downsample_factor,
weight=weight)
for spec in ProgressBar(spectra)]
headers = [downsample_header(h, downsample_factor)
for h in ProgressBar(headers)]
for hdr in headers:
stringify_header(hdr)
return spectra,headers,indexes
def stringify_header(header):
from six import string_types, integer_types
import string
FITS_allowed_types = (string_types + integer_types +
(float, complex, bool, np.floating, np.integer,
np.complexfloating, np.bool_))
bad_chars = string.printable[96:]
badcharre = re.compile("[{0}]".format(bad_chars))
for key, value in header.items():
if isinstance(value, bytes):
header[key] = value.decode()
elif not isinstance(value, FITS_allowed_types):
header[key] = badcharre.sub("", str(header[key]))
def downsample_header(hdr, downsample_factor):
for k in ('NCHAN','NPOIN','DATALEN'):
if k in hdr:
hdr[k] = int((hdr[k] / downsample_factor))
# maybe wrong? h['RCHAN'] = (h['RCHAN']-1) / downsample_factor + 1
scalefactor = 1./downsample_factor
hdr['RCHAN'] = (hdr['RCHAN']-1)*scalefactor + 0.5 + scalefactor/2.
for kw in ['FRES','VRES']:
if kw in hdr:
hdr[kw] *= downsample_factor
return hdr
def make_axis(header,imagfreq=False):
from .. import units
rest_frequency = header.get('RESTF')
xunits = 'MHz'
nchan = header.get('NCHAN')
voff = header.get('VOFF')
foff = header.get('FOFF')
doppler = header.get('DOPPLER')
fres = header.get('FRES')
refchan = header.get('RCHAN')
imfreq = header.get('IMAGE')
if foff in (None, 0.0) and voff not in (None, 0.0):
# Radio convention
foff = -voff/2.997924580e5 * rest_frequency
if not imagfreq:
xarr = rest_frequency + foff + (numpy.arange(1, nchan+1) - refchan) * fres
XAxis = units.SpectroscopicAxis(xarr,unit='MHz',refX=rest_frequency*u.MHz)
else:
xarr = imfreq - (numpy.arange(1, nchan+1) - refchan) * fres
XAxis = units.SpectroscopicAxis(xarr,unit='MHz',refX=imfreq*u.MHz)
return XAxis
@print_timing
def class_to_obsblocks(filename, telescope, line, datatuple=None, source=None,
imagfreq=False, DEBUG=False, **kwargs):
if datatuple is None:
spectra,header,indexes = read_class(filename, **kwargs)
else:
spectra,header,indexes = datatuple
obslist = []
lastscannum = -1
spectrumlist = None
for sp,hdr,ind in zip(spectra,header,indexes):
hdr.update(ind)
# this is slow but necessary...
H = pyfits.Header()
for k,v in iteritems(hdr):
if hasattr(v,"__len__") and not isinstance(v,str):
# make an array of header entries, but this
# supports only up to 10 of them...
if len(v) > 1:
if len(v) < 10:
for ii,vv in enumerate(v):
newkey = k[:7]+str(ii)
H[newkey] = vv
elif len(v) < 100:
for ii,vv in enumerate(v):
newkey = k[:6]+str(ii)
H[newkey] = vv
else:
raise ValueError("Too many entries for {0}".format(k))
else:
H[k] = v[0]
#elif not any(x in str(v).lower() for x in ('comment', 'end', 'history')):
# # do not try to add comments...
# This commented out block used to attempt to reject comments
# using a private regex in the old pyfits which no longer exists.
# I don't know if it was necessary.
else:
H[k] = v
scannum = hdr['SCAN']
if 'XTEL' in hdr and hdr['XTEL'].strip() not in telescope:
continue
if hdr['LINE'].strip() not in line:
continue
if (source is not None) and (hdr['SOURC'].strip() not in source):
continue
hdr['RESTFREQ'] = hdr.get('RESTF')
H['RESTFREQ'] = hdr.get('RESTF')
if scannum != lastscannum:
lastscannum = scannum
if spectrumlist is not None:
obslist.append(pyspeckit.ObsBlock(spectrumlist))
xarr = make_axis(hdr,imagfreq=imagfreq)
spectrumlist = [(
pyspeckit.Spectrum(xarr=xarr,
header=H,
data=sp))]
else:
spectrumlist.append(
pyspeckit.Spectrum(xarr=xarr,
header=H,
data=sp))
return obslist
class LazyItem(object):
def __init__(self, parent):
self.parent = parent
self.sphdr = {}
self.nind = len(self.parent.allind)
self.nloaded = 0
def __repr__(self):
return ("Set of {0} spectra & headers, {1} loaded"
" ({2:0.2f}%)".format(self.nind, self.nloaded,
(float(self.nloaded)/self.nind)*100))
def load_all(self, progressbar=True):
self.load(range(self.nind))
def load(self, indices, progressbar=True):
pb = ProgressBar(len(indices))
counter = 0
for k in indices:
self[k]
counter += 1
pb.update(counter)
def __getitem__(self, key):
if key in self.sphdr:
return self.sphdr[key]
elif isinstance(key, slice):
return [self[k] for k in xrange(key.start or 0,
key.end or len(self.parent.allind),
key.step or 1)]
else:
sphd = read_observation(self.parent._file, key,
file_description=self.parent.file_description,
indices=self.parent.allind,
my_memmap=self.parent._data)
sphd[1].update(self.parent.allind[key])
self.sphdr[key] = sphd
self.nloaded += 1
return sphd
def __iter__(self):
return self.next()
def __next__(self):
for k in self.spheader:
yield self.spheader[k]
def __contains__(self, key):
return key in self.sphdr
@print_timing
def class_to_spectra(filename, datatuple=None, **kwargs):
if datatuple is None:
spectra,header,indexes = read_class(filename, **kwargs)
else:
spectra,header,indexes = datatuple
spectrumlist = []
for sp,hdr,ind in zip(spectra,header,indexes):
hdr.update(ind)
xarr = make_axis(hdr)
spectrumlist.append(
pyspeckit.Spectrum(xarr=xarr,
header=hdr,
data=sp))
return pyspeckit.Spectra(spectrumlist)
def tests():
fn1 = '/Users/adam/work/bolocam/hht/class_003.smt'
p = class_to_obsblocks(fn1,telescope=['SMT-F1M-HU','SMT-F1M-VU'],line=['N2HP(3-2)','N2H+(3-2)'])
hcop = class_to_obsblocks(fn1,telescope=['SMT-F1M-HL','SMT-F1M-VL'],line=['HCOP(3-2)','HCO+(3-2)'])
| true
| true
|
f71781d481d127f72294f8baec04d9d74461c11a
| 121
|
py
|
Python
|
okta/models/usergroup/__init__.py
|
rkhleics/oktasdk-python
|
da8183444704c6d16831d1edd619390e9120dd70
|
[
"Apache-2.0"
] | 1
|
2020-09-09T12:59:19.000Z
|
2020-09-09T12:59:19.000Z
|
okta/models/usergroup/__init__.py
|
torchbox/oktasdk-python
|
da8183444704c6d16831d1edd619390e9120dd70
|
[
"Apache-2.0"
] | null | null | null |
okta/models/usergroup/__init__.py
|
torchbox/oktasdk-python
|
da8183444704c6d16831d1edd619390e9120dd70
|
[
"Apache-2.0"
] | 2
|
2017-11-02T22:12:57.000Z
|
2019-09-16T08:02:23.000Z
|
from .UserGroup import UserGroup
from .UserGroupProfile import UserGroupProfile
from .UserGroupRule import UserGroupRule
| 30.25
| 46
| 0.876033
|
from .UserGroup import UserGroup
from .UserGroupProfile import UserGroupProfile
from .UserGroupRule import UserGroupRule
| true
| true
|
f717822d090647eb7a44dad23a51405caa178759
| 1,679
|
py
|
Python
|
stockbot/ticker/sinotrade/session.py
|
tanlin2013/stockbot
|
08322ed4d847ea9e58b091985cef5c128a694b12
|
[
"Apache-2.0"
] | 1
|
2021-07-12T23:55:20.000Z
|
2021-07-12T23:55:20.000Z
|
stockbot/ticker/sinotrade/session.py
|
ajmal017/stockbot-7
|
08322ed4d847ea9e58b091985cef5c128a694b12
|
[
"Apache-2.0"
] | null | null | null |
stockbot/ticker/sinotrade/session.py
|
ajmal017/stockbot-7
|
08322ed4d847ea9e58b091985cef5c128a694b12
|
[
"Apache-2.0"
] | 1
|
2021-07-12T23:55:12.000Z
|
2021-07-12T23:55:12.000Z
|
import os
import logging
import pandas as pd
from datetime import date
from shioaji import Shioaji
class Session(Shioaji):
def __init__(self, simulation: bool = False, timeout: int = 10000) -> None:
"""
Args:
simulation:
timeout:
Notes: The ID of test account ranging from `PAPIUSER01` to `PAPIUSER08`,
with password `2222`.
"""
_person_id = f"PAPIUSER05" \
if simulation else os.environ['SINOTRADE_ID']
_passwd = "2222" \
if simulation else os.environ['SINOTRADE_PASSWD']
super(Session, self).__init__(simulation=simulation)
self.login(
person_id=_person_id,
passwd=_passwd,
contracts_cb=lambda security_type: logging.info(f"{repr(security_type)} fetch done."),
contracts_timeout=timeout
)
def __del__(self) -> None:
self.logout()
logging.info("session closed.")
@property
def positions(self) -> pd.DataFrame:
return pd.DataFrame(
self.list_positions(self.stock_account)
)
def profit_loss(self, begin_date: date, end_date: date) -> pd.DataFrame:
return pd.DataFrame(self.list_profit_loss(
self.stock_account,
begin_date=begin_date.strftime('%Y-%m-%d'),
end_date=end_date.strftime('%Y-%m-%d')
))
@property
def settlements(self) -> pd.DataFrame:
return pd.DataFrame(
self.list_settlements(self.stock_account)
)
@property
def balance(self) -> pd.DataFrame:
return pd.DataFrame(
self.account_balance()
)
| 27.983333
| 98
| 0.596784
|
import os
import logging
import pandas as pd
from datetime import date
from shioaji import Shioaji
class Session(Shioaji):
def __init__(self, simulation: bool = False, timeout: int = 10000) -> None:
_person_id = f"PAPIUSER05" \
if simulation else os.environ['SINOTRADE_ID']
_passwd = "2222" \
if simulation else os.environ['SINOTRADE_PASSWD']
super(Session, self).__init__(simulation=simulation)
self.login(
person_id=_person_id,
passwd=_passwd,
contracts_cb=lambda security_type: logging.info(f"{repr(security_type)} fetch done."),
contracts_timeout=timeout
)
def __del__(self) -> None:
self.logout()
logging.info("session closed.")
@property
def positions(self) -> pd.DataFrame:
return pd.DataFrame(
self.list_positions(self.stock_account)
)
def profit_loss(self, begin_date: date, end_date: date) -> pd.DataFrame:
return pd.DataFrame(self.list_profit_loss(
self.stock_account,
begin_date=begin_date.strftime('%Y-%m-%d'),
end_date=end_date.strftime('%Y-%m-%d')
))
@property
def settlements(self) -> pd.DataFrame:
return pd.DataFrame(
self.list_settlements(self.stock_account)
)
@property
def balance(self) -> pd.DataFrame:
return pd.DataFrame(
self.account_balance()
)
| true
| true
|
f71782577c5c2b9953da5e9ae6c0a019748c14b7
| 3,361
|
py
|
Python
|
src/application-insights/azext_applicationinsights/vendored_sdks/applicationinsights/models/events_trace_result.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
src/application-insights/azext_applicationinsights/vendored_sdks/applicationinsights/models/events_trace_result.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
src/application-insights/azext_applicationinsights/vendored_sdks/applicationinsights/models/events_trace_result.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .events_result_data import EventsResultData
class EventsTraceResult(EventsResultData):
"""A trace result.
All required parameters must be populated in order to send to Azure.
:param id: The unique ID for this event.
:type id: str
:param count: Count of the event
:type count: long
:param timestamp: Timestamp of the event
:type timestamp: datetime
:param custom_dimensions: Custom dimensions of the event
:type custom_dimensions:
~azure.applicationinsights.models.EventsResultDataCustomDimensions
:param custom_measurements: Custom measurements of the event
:type custom_measurements:
~azure.applicationinsights.models.EventsResultDataCustomMeasurements
:param operation: Operation info of the event
:type operation: ~azure.applicationinsights.models.EventsOperationInfo
:param session: Session info of the event
:type session: ~azure.applicationinsights.models.EventsSessionInfo
:param user: User info of the event
:type user: ~azure.applicationinsights.models.EventsUserInfo
:param cloud: Cloud info of the event
:type cloud: ~azure.applicationinsights.models.EventsCloudInfo
:param ai: AI info of the event
:type ai: ~azure.applicationinsights.models.EventsAiInfo
:param application: Application info of the event
:type application: ~azure.applicationinsights.models.EventsApplicationInfo
:param client: Client info of the event
:type client: ~azure.applicationinsights.models.EventsClientInfo
:param type: Required. Constant filled by server.
:type type: str
:param trace:
:type trace: ~azure.applicationinsights.models.EventsTraceInfo
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'count': {'key': 'count', 'type': 'long'},
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'custom_dimensions': {'key': 'customDimensions', 'type': 'EventsResultDataCustomDimensions'},
'custom_measurements': {'key': 'customMeasurements', 'type': 'EventsResultDataCustomMeasurements'},
'operation': {'key': 'operation', 'type': 'EventsOperationInfo'},
'session': {'key': 'session', 'type': 'EventsSessionInfo'},
'user': {'key': 'user', 'type': 'EventsUserInfo'},
'cloud': {'key': 'cloud', 'type': 'EventsCloudInfo'},
'ai': {'key': 'ai', 'type': 'EventsAiInfo'},
'application': {'key': 'application', 'type': 'EventsApplicationInfo'},
'client': {'key': 'client', 'type': 'EventsClientInfo'},
'type': {'key': 'type', 'type': 'str'},
'trace': {'key': 'trace', 'type': 'EventsTraceInfo'},
}
def __init__(self, **kwargs):
super(EventsTraceResult, self).__init__(**kwargs)
self.trace = kwargs.get('trace', None)
self.type = 'trace'
| 43.649351
| 107
| 0.650997
|
from .events_result_data import EventsResultData
class EventsTraceResult(EventsResultData):
_validation = {
'type': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'count': {'key': 'count', 'type': 'long'},
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'custom_dimensions': {'key': 'customDimensions', 'type': 'EventsResultDataCustomDimensions'},
'custom_measurements': {'key': 'customMeasurements', 'type': 'EventsResultDataCustomMeasurements'},
'operation': {'key': 'operation', 'type': 'EventsOperationInfo'},
'session': {'key': 'session', 'type': 'EventsSessionInfo'},
'user': {'key': 'user', 'type': 'EventsUserInfo'},
'cloud': {'key': 'cloud', 'type': 'EventsCloudInfo'},
'ai': {'key': 'ai', 'type': 'EventsAiInfo'},
'application': {'key': 'application', 'type': 'EventsApplicationInfo'},
'client': {'key': 'client', 'type': 'EventsClientInfo'},
'type': {'key': 'type', 'type': 'str'},
'trace': {'key': 'trace', 'type': 'EventsTraceInfo'},
}
def __init__(self, **kwargs):
super(EventsTraceResult, self).__init__(**kwargs)
self.trace = kwargs.get('trace', None)
self.type = 'trace'
| true
| true
|
f71782e87705531559d4a97ca72db46a973a03f6
| 30,692
|
py
|
Python
|
examples/ner/run_ner_strain.py
|
Tarpelite/BERT_self_training
|
f50ff015f0d3669b5d927a6d28d8a08201c101b6
|
[
"MIT"
] | null | null | null |
examples/ner/run_ner_strain.py
|
Tarpelite/BERT_self_training
|
f50ff015f0d3669b5d927a6d28d8a08201c101b6
|
[
"MIT"
] | null | null | null |
examples/ner/run_ner_strain.py
|
Tarpelite/BERT_self_training
|
f50ff015f0d3669b5d927a6d28d8a08201c101b6
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fine-tuning the library models for named entity recognition on CoNLL-2003 (Bert or Roberta). """
import argparse
import glob
import logging
import os
import random
import numpy as np
import torch
from seqeval.metrics import f1_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
import pickle
# from pudb import set_trace
# set_trace()
from transformers import (
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
WEIGHTS_NAME,
AdamW,
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
get_linear_schedule_with_warmup,
)
from utils_ner import convert_examples_to_features, get_labels, read_examples_from_file
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in MODEL_CONFIG_CLASSES), ())
TOKENIZER_ARGS = ["do_lower_case", "strip_accents", "keep_accents", "use_fast"]
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
if args.warmup_ratio > 0:
args.warmup_steps = int(t_total * args.warmup_ratio)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to gobal_step of last saved checkpoint from model path
try:
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
except ValueError:
global_step = 0
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
set_seed(args) # Added here for reproductibility
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iter(loss=X.XXX, lr=X.XXXXXXXX)", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "soft_labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
epoch_iterator.set_description('Iter (loss=%5.3f) lr=%9.7f' % (loss.item(), scheduler.get_lr()[0]))
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev")
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=""):
eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation %s *****", prefix)
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean() # mean() to average on multi-gpu parallel evaluating
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
print("preds:", preds_list[0])
print("labels:", out_label_list[0])
results = {
"loss": eval_loss,
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list),
}
logger.info("***** Eval results %s *****", prefix)
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results, preds_list
def load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Load data features from cache or dataset file
logger.info("Creating features from dataset file at %s", args.data_dir)
examples = read_examples_from_file(args.eval_file, mode)
features = convert_examples_to_features(
examples,
labels,
args.max_seq_length,
tokenizer,
cls_token_at_end=bool(args.model_type in ["xlnet"]),
# xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(args.model_type in ["roberta"]),
# roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=bool(args.model_type in ["xlnet"]),
# pad on the left for xlnet
pad_token=tokenizer.pad_token_id,
pad_token_segment_id=tokenizer.pad_token_type_id,
pad_token_label_id=pad_token_label_id,
)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_TYPES),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Other parameters
parser.add_argument(
"--labels",
default="",
type=str,
help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.",
)
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.")
parser.add_argument(
"--evaluate_during_training",
action="store_true",
help="Whether to run evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model."
)
parser.add_argument(
"--keep_accents", action="store_const", const=True, help="Set this flag if model is trained with accents."
)
parser.add_argument(
"--strip_accents", action="store_const", const=True, help="Set this flag if model is trained without accents."
)
parser.add_argument("--use_fast", action="store_const", const=True, help="Set this flag to use fast tokenization.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation."
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform."
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument("--logits_file", type=str, default="")
parser.add_argument("--eval_file", type=str, default="")
parser.add_argument("--warmup_ratio", type=float, default=0.1)
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory"
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare CONLL-2003 task
labels = get_labels(args.labels)
num_labels = len(labels)
# Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later
pad_token_label_id = CrossEntropyLoss().ignore_index
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config = AutoConfig.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
id2label={str(i): label for i, label in enumerate(labels)},
label2id={label: i for i, label in enumerate(labels)},
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer_args = {k: v for k, v in vars(args).items() if v is not None and k in TOKENIZER_ARGS}
logger.info("Tokenizer arguments: %s", tokenizer_args)
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None,
**tokenizer_args,
)
model = AutoModelForTokenClassification.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
# train_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode="train")
with open(args.logits_file, "rb") as f:
datasets = pickle.load(f)
all_input_ids = torch.tensor(datasets[0], dtype=torch.long)
all_input_mask = torch.tensor(datasets[1], dtype=torch.long)
all_segment_ids = torch.tensor(datasets[2], dtype=torch.long)
all_ner_logits = torch.tensor(datasets[3], dtype=torch.float)
train_dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_ner_logits)
global_step, tr_loss = train(args, train_dataset, model, tokenizer, labels, pad_token_label_id)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = AutoTokenizer.from_pretrained(args.output_dir, **tokenizer_args)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
model = AutoModelForTokenClassification.from_pretrained(checkpoint)
model.to(args.device)
result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix=global_step)
if global_step:
result = {"{}_{}".format(global_step, k): v for k, v in result.items()}
results.update(result)
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
if args.do_predict and args.local_rank in [-1, 0]:
tokenizer = AutoTokenizer.from_pretrained(args.output_dir, **tokenizer_args)
model = AutoModelForTokenClassification.from_pretrained(args.output_dir)
model.to(args.device)
result, predictions = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="test")
# Save results
output_test_results_file = os.path.join(args.output_dir, "test_results.txt")
with open(output_test_results_file, "w") as writer:
for key in sorted(result.keys()):
writer.write("{} = {}\n".format(key, str(result[key])))
# Save predictions
output_test_predictions_file = os.path.join(args.output_dir, "test_predictions.txt")
with open(output_test_predictions_file, "w") as writer:
with open(os.path.join(args.data_dir, "test.txt"), "r") as f:
example_id = 0
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
writer.write(line)
if not predictions[example_id]:
example_id += 1
elif predictions[example_id]:
output_line = line.split()[0] + " " + predictions[example_id].pop(0) + "\n"
writer.write(output_line)
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0])
return results
if __name__ == "__main__":
main()
| 44.161151
| 150
| 0.655741
|
import argparse
import glob
import logging
import os
import random
import numpy as np
import torch
from seqeval.metrics import f1_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
import pickle
from transformers import (
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
WEIGHTS_NAME,
AdamW,
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
get_linear_schedule_with_warmup,
)
from utils_ner import convert_examples_to_features, get_labels, read_examples_from_file
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in MODEL_CONFIG_CLASSES), ())
TOKENIZER_ARGS = ["do_lower_case", "strip_accents", "keep_accents", "use_fast"]
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id):
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
if args.warmup_ratio > 0:
args.warmup_steps = int(t_total * args.warmup_ratio)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
if os.path.exists(args.model_name_or_path):
try:
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
except ValueError:
global_step = 0
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
set_seed(args)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iter(loss=X.XXX, lr=X.XXXXXXXX)", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "soft_labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet"] else None
)
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
epoch_iterator.set_description('Iter (loss=%5.3f) lr=%9.7f' % (loss.item(), scheduler.get_lr()[0]))
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev")
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=""):
eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation %s *****", prefix)
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean()
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
print("preds:", preds_list[0])
print("labels:", out_label_list[0])
results = {
"loss": eval_loss,
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list),
}
logger.info("***** Eval results %s *****", prefix)
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results, preds_list
def load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier()
logger.info("Creating features from dataset file at %s", args.data_dir)
examples = read_examples_from_file(args.eval_file, mode)
features = convert_examples_to_features(
examples,
labels,
args.max_seq_length,
tokenizer,
cls_token_at_end=bool(args.model_type in ["xlnet"]),
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(args.model_type in ["roberta"]),
pad_on_left=bool(args.model_type in ["xlnet"]),
pad_token=tokenizer.pad_token_id,
pad_token_segment_id=tokenizer.pad_token_type_id,
pad_token_label_id=pad_token_label_id,
)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier()
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir",
default=None,
type=str,
help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_TYPES),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--labels",
default="",
type=str,
help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.",
)
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.")
parser.add_argument(
"--evaluate_during_training",
action="store_true",
help="Whether to run evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model."
)
parser.add_argument(
"--keep_accents", action="store_const", const=True, help="Set this flag if model is trained with accents."
)
parser.add_argument(
"--strip_accents", action="store_const", const=True, help="Set this flag if model is trained without accents."
)
parser.add_argument("--use_fast", action="store_const", const=True, help="Set this flag to use fast tokenization.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation."
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform."
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument("--logits_file", type=str, default="")
parser.add_argument("--eval_file", type=str, default="")
parser.add_argument("--warmup_ratio", type=float, default=0.1)
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory"
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
if args.server_ip and args.server_port:
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
set_seed(args)
labels = get_labels(args.labels)
num_labels = len(labels)
pad_token_label_id = CrossEntropyLoss().ignore_index
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
args.model_type = args.model_type.lower()
config = AutoConfig.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
id2label={str(i): label for i, label in enumerate(labels)},
label2id={label: i for i, label in enumerate(labels)},
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer_args = {k: v for k, v in vars(args).items() if v is not None and k in TOKENIZER_ARGS}
logger.info("Tokenizer arguments: %s", tokenizer_args)
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None,
**tokenizer_args,
)
model = AutoModelForTokenClassification.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier()
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
if args.do_train:
with open(args.logits_file, "rb") as f:
datasets = pickle.load(f)
all_input_ids = torch.tensor(datasets[0], dtype=torch.long)
all_input_mask = torch.tensor(datasets[1], dtype=torch.long)
all_segment_ids = torch.tensor(datasets[2], dtype=torch.long)
all_ner_logits = torch.tensor(datasets[3], dtype=torch.float)
train_dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_ner_logits)
global_step, tr_loss = train(args, train_dataset, model, tokenizer, labels, pad_token_label_id)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
)
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = AutoTokenizer.from_pretrained(args.output_dir, **tokenizer_args)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN)
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
model = AutoModelForTokenClassification.from_pretrained(checkpoint)
model.to(args.device)
result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix=global_step)
if global_step:
result = {"{}_{}".format(global_step, k): v for k, v in result.items()}
results.update(result)
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
if args.do_predict and args.local_rank in [-1, 0]:
tokenizer = AutoTokenizer.from_pretrained(args.output_dir, **tokenizer_args)
model = AutoModelForTokenClassification.from_pretrained(args.output_dir)
model.to(args.device)
result, predictions = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="test")
output_test_results_file = os.path.join(args.output_dir, "test_results.txt")
with open(output_test_results_file, "w") as writer:
for key in sorted(result.keys()):
writer.write("{} = {}\n".format(key, str(result[key])))
output_test_predictions_file = os.path.join(args.output_dir, "test_predictions.txt")
with open(output_test_predictions_file, "w") as writer:
with open(os.path.join(args.data_dir, "test.txt"), "r") as f:
example_id = 0
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
writer.write(line)
if not predictions[example_id]:
example_id += 1
elif predictions[example_id]:
output_line = line.split()[0] + " " + predictions[example_id].pop(0) + "\n"
writer.write(output_line)
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0])
return results
if __name__ == "__main__":
main()
| true
| true
|
f717830652b40b5662e7d5601be70f7d7dfd33fa
| 629
|
py
|
Python
|
code/LTI/Demos/Tex_matplotlib.py
|
chipmuenk/acoustics
|
c85ac95a10c09d7fa15d63b2bdb24acab89fec60
|
[
"MIT"
] | null | null | null |
code/LTI/Demos/Tex_matplotlib.py
|
chipmuenk/acoustics
|
c85ac95a10c09d7fa15d63b2bdb24acab89fec60
|
[
"MIT"
] | null | null | null |
code/LTI/Demos/Tex_matplotlib.py
|
chipmuenk/acoustics
|
c85ac95a10c09d7fa15d63b2bdb24acab89fec60
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon May 14 14:15:52 2012
Plot mit TeX-Formatierung der Labels
(LaTeX muss auf dem Rechner installiert sein)
"""
import numpy as np
from matplotlib import rc
import matplotlib.pyplot as plt
rc('text', usetex=True)
plt.figure(1)
ax = plt.axes([0.1, 0.1, 0.8, 0.7])
t = np.arange(0.0, 1.0+0.01, 0.01)
s = np.cos(2*2*np.pi*t)+2
plt.plot(t, s)
plt.xlabel(r'\textbf{Time (s)}')
plt.ylabel(r'\textit{Voltage} (mV)',fontsize=16)
plt.title(r"\TeX\ is Number $\displaystyle\sum_{n=1}^\infty\frac{-e^{i\pi}}{2^n}$!",
fontsize=16, color='r')
plt.grid(True)
plt.savefig('tex_demo')
plt.show()
| 23.296296
| 84
| 0.659777
|
import numpy as np
from matplotlib import rc
import matplotlib.pyplot as plt
rc('text', usetex=True)
plt.figure(1)
ax = plt.axes([0.1, 0.1, 0.8, 0.7])
t = np.arange(0.0, 1.0+0.01, 0.01)
s = np.cos(2*2*np.pi*t)+2
plt.plot(t, s)
plt.xlabel(r'\textbf{Time (s)}')
plt.ylabel(r'\textit{Voltage} (mV)',fontsize=16)
plt.title(r"\TeX\ is Number $\displaystyle\sum_{n=1}^\infty\frac{-e^{i\pi}}{2^n}$!",
fontsize=16, color='r')
plt.grid(True)
plt.savefig('tex_demo')
plt.show()
| true
| true
|
f717831ab43bf5e60f20fa256ffdf13e2b588a99
| 6,871
|
py
|
Python
|
env/Lib/site-packages/pyttsx/drivers/dummy.py
|
TrinhAnBinh/covid_vir_assistant_ver_0.0.2
|
b4471f4894c1bc203980f06b811f63e8e8f6b3ab
|
[
"MIT"
] | 160
|
2016-10-04T22:45:36.000Z
|
2022-02-10T06:41:56.000Z
|
env/Lib/site-packages/pyttsx/drivers/dummy.py
|
TrinhAnBinh/covid_vir_assistant_ver_0.0.2
|
b4471f4894c1bc203980f06b811f63e8e8f6b3ab
|
[
"MIT"
] | 27
|
2016-10-04T02:45:18.000Z
|
2022-03-09T15:15:54.000Z
|
env/Lib/site-packages/pyttsx/drivers/dummy.py
|
TrinhAnBinh/covid_vir_assistant_ver_0.0.2
|
b4471f4894c1bc203980f06b811f63e8e8f6b3ab
|
[
"MIT"
] | 58
|
2016-10-06T16:53:43.000Z
|
2021-10-21T22:17:35.000Z
|
'''
Dummy driver that produces no output but gives all expected callbacks. Useful
for testing and as a model for real drivers.
Copyright (c) 2009, 2013 Peter Parente
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
from ..voice import Voice
import time
def buildDriver(proxy):
'''
Builds a new instance of a driver and returns it for use by the driver
proxy.
@param proxy: Proxy creating the driver
@type proxy: L{driver.DriverProxy}
'''
return DummyDriver(proxy)
class DummyDriver(object):
'''
Dummy speech engine implementation. Documents the interface, notifications,
properties, and sequencing responsibilities of a driver implementation.
@ivar _proxy: Driver proxy that manages this instance
@type _proxy: L{driver.DriverProxy}
@ivar _config: Dummy configuration
@type _config: dict
@ivar _looping: True when in the dummy event loop, False when not
@ivar _looping: bool
'''
def __init__(self, proxy):
'''
Constructs the driver.
@param proxy: Proxy creating the driver
@type proxy: L{driver.DriverProxy}
'''
self._proxy = proxy
self._looping = False
# hold config values as if we had a real tts implementation that
# supported them
voices = [
Voice('dummy.voice1', 'John Doe', ['en-US', 'en-GB'], 'male', 'adult'),
Voice('dummy.voice2', 'Jane Doe', ['en-US', 'en-GB'], 'female', 'adult'),
Voice('dummy.voice3', 'Jimmy Doe', ['en-US', 'en-GB'], 'male', 10)
]
self._config = {
'rate' : 200,
'volume' : 1.0,
'voice' : voices[0],
'voices' : voices
}
def destroy(self):
'''
Optional method that will be called when the driver proxy is being
destroyed. Can cleanup any resources to make sure the engine terminates
properly.
'''
pass
def startLoop(self):
'''
Starts a blocking run loop in which driver callbacks are properly
invoked.
@precondition: There was no previous successful call to L{startLoop}
without an intervening call to L{stopLoop}.
'''
first = True
self._looping = True
while self._looping:
if first:
self._proxy.setBusy(False)
first = False
time.sleep(0.5)
def endLoop(self):
'''
Stops a previously started run loop.
@precondition: A previous call to L{startLoop} suceeded and there was
no intervening call to L{endLoop}.
'''
self._looping = False
def iterate(self):
'''
Iterates from within an external run loop.
'''
self._proxy.setBusy(False)
yield
def say(self, text):
'''
Speaks the given text. Generates the following notifications during
output:
started-utterance: When speech output has started
started-word: When a word is about to be spoken. Includes the character
"location" of the start of the word in the original utterance text
and the "length" of the word in characters.
finished-utterance: When speech output has finished. Includes a flag
indicating if the entire utterance was "completed" or not.
The proxy automatically adds any "name" associated with the utterance
to the notifications on behalf of the driver.
When starting to output an utterance, the driver must inform its proxy
that it is busy by invoking L{driver.DriverProxy.setBusy} with a flag
of True. When the utterance completes or is interrupted, the driver
inform the proxy that it is no longer busy by invoking
L{driver.DriverProxy.setBusy} with a flag of False.
@param text: Unicode text to speak
@type text: unicode
'''
self._proxy.setBusy(True)
self._proxy.notify('started-utterance')
i = 0
for word in text.split(' '):
self._proxy.notify('started-word', location=i, length=len(word))
try:
i = text.index(' ', i+1)+1
except Exception:
pass
self._proxy.notify('finished-utterance', completed=True)
self._proxy.setBusy(False)
def stop(self):
'''
Stops any current output. If an utterance was being spoken, the driver
is still responsible for sending the closing finished-utterance
notification documented above and resetting the busy state of the
proxy.
'''
pass
def getProperty(self, name):
'''
Gets a property value of the speech engine. The suppoted properties
and their values are:
voices: List of L{voice.Voice} objects supported by the driver
voice: String ID of the current voice
rate: Integer speech rate in words per minute
volume: Floating point volume of speech in the range [0.0, 1.0]
@param name: Property name
@type name: str
@raise KeyError: When the property name is unknown
'''
try:
return self._config[name]
except KeyError:
raise KeyError('unknown property %s' % name)
def setProperty(self, name, value):
'''
Sets one of the supported property values of the speech engine listed
above. If a value is invalid, attempts to clip it / coerce so it is
valid before giving up and firing an exception.
@param name: Property name
@type name: str
@param value: Property value
@type value: object
@raise KeyError: When the property name is unknown
@raise ValueError: When the value cannot be coerced to fit the property
'''
if name == 'voice':
v = [v for v in self._config['voices'] if v.id == value]
self._config['voice'] = v[0]
elif name == 'rate':
self._config['rate'] = value
elif name == 'volume':
self._config['volume'] = value
else:
raise KeyError('unknown property %s' % name)
| 35.601036
| 85
| 0.625382
|
from ..voice import Voice
import time
def buildDriver(proxy):
return DummyDriver(proxy)
class DummyDriver(object):
def __init__(self, proxy):
self._proxy = proxy
self._looping = False
voices = [
Voice('dummy.voice1', 'John Doe', ['en-US', 'en-GB'], 'male', 'adult'),
Voice('dummy.voice2', 'Jane Doe', ['en-US', 'en-GB'], 'female', 'adult'),
Voice('dummy.voice3', 'Jimmy Doe', ['en-US', 'en-GB'], 'male', 10)
]
self._config = {
'rate' : 200,
'volume' : 1.0,
'voice' : voices[0],
'voices' : voices
}
def destroy(self):
pass
def startLoop(self):
first = True
self._looping = True
while self._looping:
if first:
self._proxy.setBusy(False)
first = False
time.sleep(0.5)
def endLoop(self):
self._looping = False
def iterate(self):
self._proxy.setBusy(False)
yield
def say(self, text):
self._proxy.setBusy(True)
self._proxy.notify('started-utterance')
i = 0
for word in text.split(' '):
self._proxy.notify('started-word', location=i, length=len(word))
try:
i = text.index(' ', i+1)+1
except Exception:
pass
self._proxy.notify('finished-utterance', completed=True)
self._proxy.setBusy(False)
def stop(self):
pass
def getProperty(self, name):
try:
return self._config[name]
except KeyError:
raise KeyError('unknown property %s' % name)
def setProperty(self, name, value):
if name == 'voice':
v = [v for v in self._config['voices'] if v.id == value]
self._config['voice'] = v[0]
elif name == 'rate':
self._config['rate'] = value
elif name == 'volume':
self._config['volume'] = value
else:
raise KeyError('unknown property %s' % name)
| true
| true
|
f71785323c845db207650f7917b7bc72e98e1b96
| 26,982
|
py
|
Python
|
manila/tests/share/drivers/quobyte/test_quobyte.py
|
deiter/manila
|
ba94d20e823d2edad7e9bd01546cf1642b17d212
|
[
"Apache-2.0"
] | 1
|
2019-05-06T10:33:38.000Z
|
2019-05-06T10:33:38.000Z
|
manila/tests/share/drivers/quobyte/test_quobyte.py
|
deiter/manila
|
ba94d20e823d2edad7e9bd01546cf1642b17d212
|
[
"Apache-2.0"
] | 4
|
2019-05-06T11:45:17.000Z
|
2019-05-09T14:23:28.000Z
|
manila/tests/share/drivers/quobyte/test_quobyte.py
|
deiter/manila
|
ba94d20e823d2edad7e9bd01546cf1642b17d212
|
[
"Apache-2.0"
] | 3
|
2019-05-03T12:32:47.000Z
|
2021-01-30T20:26:19.000Z
|
# Copyright (c) 2015 Quobyte, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
import six
from manila import context
from manila import exception
from manila.share import configuration as config
from manila.share import driver
from manila.share.drivers.quobyte import jsonrpc
from manila.share.drivers.quobyte import quobyte
from manila import test
from manila.tests import fake_share
CONF = cfg.CONF
def fake_rpc_handler(name, *args):
if name == 'resolveVolumeName':
return None
elif name == 'createVolume':
return {'volume_uuid': 'voluuid'}
elif name == 'exportVolume':
return {'nfs_server_ip': 'fake_location',
'nfs_export_path': '/fake_share'}
elif name == 'getConfiguration':
return {
"tenant_configuration": [{
"domain_name": "fake_domain_name",
"volume_access": [
{"volume_uuid": "fake_id_1",
"restrict_to_network": "10.0.0.1",
"read_only": False},
{"volume_uuid": "fake_id_1",
"restrict_to_network": "10.0.0.2",
"read_only": False},
{"volume_uuid": "fake_id_2",
"restrict_to_network": "10.0.0.3",
"read_only": False}
]},
{"domain_name": "fake_domain_name_2",
"volume_access": [
{"volume_uuid": "fake_id_3",
"restrict_to_network": "10.0.0.4",
"read_only": False},
{"volume_uuid": "fake_id_3",
"restrict_to_network": "10.0.0.5",
"read_only": True},
{"volume_uuid": "fake_id_4",
"restrict_to_network": "10.0.0.6",
"read_only": False}
]}
]
}
else:
return "Unknown fake rpc handler call"
def create_fake_access(access_adr,
access_id='fake_access_id',
access_type='ip',
access_level='rw'):
return {
'access_id': access_id,
'access_type': access_type,
'access_to': access_adr,
'access_level': access_level
}
class QuobyteShareDriverTestCase(test.TestCase):
"""Tests QuobyteShareDriver."""
def setUp(self):
super(QuobyteShareDriverTestCase, self).setUp()
self._context = context.get_admin_context()
CONF.set_default('driver_handles_share_servers', False)
self.fake_conf = config.Configuration(None)
self._driver = quobyte.QuobyteShareDriver(configuration=self.fake_conf)
self._driver.rpc = mock.Mock()
self.share = fake_share.fake_share(share_proto='NFS')
self.access = fake_share.fake_access()
@mock.patch('manila.share.drivers.quobyte.jsonrpc.JsonRpc', mock.Mock())
def test_do_setup_success(self):
self._driver.rpc.call = mock.Mock(return_value=None)
self._driver.do_setup(self._context)
self._driver.rpc.call.assert_called_with('getInformation', {})
@mock.patch('manila.share.drivers.quobyte.jsonrpc.JsonRpc.__init__',
mock.Mock(return_value=None))
@mock.patch.object(jsonrpc.JsonRpc, 'call',
side_effect=exception.QBRpcException)
def test_do_setup_failure(self, mock_call):
self.assertRaises(exception.QBException,
self._driver.do_setup, self._context)
def test_create_share_new_volume(self):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
result = self._driver.create_share(self._context, self.share)
self.assertEqual(self.share['export_location'], result)
self._driver.rpc.call.assert_has_calls([
mock.call('createVolume', dict(
name=self.share['name'],
tenant_domain=self.share['project_id'],
root_user_id=self.fake_conf.quobyte_default_volume_user,
root_group_id=self.fake_conf.quobyte_default_volume_group,
configuration_name=self.fake_conf.quobyte_volume_configuration
)),
mock.call('exportVolume',
dict(protocol='NFS', volume_uuid='voluuid'))])
def test_create_share_existing_volume(self):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
self._driver.create_share(self._context, self.share)
self._driver.rpc.call.assert_called_with(
'exportVolume', dict(protocol='NFS', volume_uuid='voluuid'))
def test_create_share_wrong_protocol(self):
share = {'share_proto': 'WRONG_PROTOCOL'}
self.assertRaises(exception.QBException,
self._driver.create_share,
context=None,
share=share)
def test_delete_share_existing_volume(self):
def rpc_handler(name, *args):
if name == 'resolveVolumeName':
return {'volume_uuid': 'voluuid'}
elif name == 'exportVolume':
return {}
self._driver.configuration.quobyte_delete_shares = True
self._driver.rpc.call = mock.Mock(wraps=rpc_handler)
self._driver.delete_share(self._context, self.share)
self._driver.rpc.call.assert_has_calls([
mock.call('resolveVolumeName',
{'volume_name': 'fakename',
'tenant_domain': 'fake_project_uuid'}),
mock.call('deleteVolume', {'volume_uuid': 'voluuid'}),
mock.call('exportVolume', {'volume_uuid': 'voluuid',
'remove_export': True})])
def test_delete_share_existing_volume_disabled(self):
def rpc_handler(name, *args):
if name == 'resolveVolumeName':
return {'volume_uuid': 'voluuid'}
elif name == 'exportVolume':
return {}
CONF.set_default('quobyte_delete_shares', False)
self._driver.rpc.call = mock.Mock(wraps=rpc_handler)
self._driver.delete_share(self._context, self.share)
self._driver.rpc.call.assert_called_with(
'exportVolume', {'volume_uuid': 'voluuid',
'remove_export': True})
@mock.patch.object(quobyte.LOG, 'warning')
def test_delete_share_nonexisting_volume(self, mock_warning):
def rpc_handler(name, *args):
if name == 'resolveVolumeName':
return None
self._driver.rpc.call = mock.Mock(wraps=rpc_handler)
self._driver.delete_share(self._context, self.share)
mock_warning.assert_called_with(
'No volume found for share fake_project_uuid/fakename')
def test_allow_access(self):
def rpc_handler(name, *args):
if name == 'resolveVolumeName':
return {'volume_uuid': 'voluuid'}
elif name == 'exportVolume':
return {'nfs_server_ip': '10.10.1.1',
'nfs_export_path': '/voluuid'}
self._driver.rpc.call = mock.Mock(wraps=rpc_handler)
self._driver._allow_access(self._context, self.share, self.access)
self._driver.rpc.call.assert_called_with(
'exportVolume', {'volume_uuid': 'voluuid',
'read_only': False,
'add_allow_ip': '10.0.0.1'})
def test_allow_ro_access(self):
def rpc_handler(name, *args):
if name == 'resolveVolumeName':
return {'volume_uuid': 'voluuid'}
elif name == 'exportVolume':
return {'nfs_server_ip': '10.10.1.1',
'nfs_export_path': '/voluuid'}
self._driver.rpc.call = mock.Mock(wraps=rpc_handler)
ro_access = fake_share.fake_access(access_level='ro')
self._driver._allow_access(self._context, self.share, ro_access)
self._driver.rpc.call.assert_called_with(
'exportVolume', {'volume_uuid': 'voluuid',
'read_only': True,
'add_allow_ip': '10.0.0.1'})
def test_allow_access_nonip(self):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
self.access = fake_share.fake_access(**{"access_type":
"non_existant_access_type"})
self.assertRaises(exception.InvalidShareAccess,
self._driver._allow_access,
self._context, self.share, self.access)
def test_deny_access(self):
def rpc_handler(name, *args):
if name == 'resolveVolumeName':
return {'volume_uuid': 'voluuid'}
elif name == 'exportVolume':
return {'nfs_server_ip': '10.10.1.1',
'nfs_export_path': '/voluuid'}
self._driver.rpc.call = mock.Mock(wraps=rpc_handler)
self._driver._deny_access(self._context, self.share, self.access)
self._driver.rpc.call.assert_called_with(
'exportVolume',
{'volume_uuid': 'voluuid', 'remove_allow_ip': '10.0.0.1'})
@mock.patch.object(quobyte.LOG, 'debug')
def test_deny_access_nonip(self, mock_debug):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
self.access = fake_share.fake_access(
access_type="non_existant_access_type")
self._driver._deny_access(self._context, self.share, self.access)
mock_debug.assert_called_with(
'Quobyte driver only supports ip access control. '
'Ignoring deny access call for %s , %s',
'fakename', 'fake_project_uuid')
def test_resolve_volume_name(self):
self._driver.rpc.call = mock.Mock(
return_value={'volume_uuid': 'fake_uuid'})
self._driver._resolve_volume_name('fake_vol_name', 'fake_domain_name')
self._driver.rpc.call.assert_called_with(
'resolveVolumeName',
{'volume_name': 'fake_vol_name',
'tenant_domain': 'fake_domain_name'})
def test_resolve_volume_name_NOENT(self):
self._driver.rpc.call = mock.Mock(
return_value=None)
self.assertIsNone(
self._driver._resolve_volume_name('fake_vol_name',
'fake_domain_name'))
def test_resolve_volume_name_other_error(self):
self._driver.rpc.call = mock.Mock(
side_effect=exception.QBRpcException(
result='fubar',
qbcode=666))
self.assertRaises(exception.QBRpcException,
self._driver._resolve_volume_name,
volume_name='fake_vol_name',
tenant_domain='fake_domain_name')
@mock.patch.object(driver.ShareDriver, '_update_share_stats')
def test_update_share_stats(self, mock_uss):
self._driver._get_capacities = mock.Mock(return_value=[42, 23])
self._driver._update_share_stats()
mock_uss.assert_called_once_with(
dict(storage_protocol='NFS',
vendor_name='Quobyte',
share_backend_name=self._driver.backend_name,
driver_version=self._driver.DRIVER_VERSION,
total_capacity_gb=42,
free_capacity_gb=23,
reserved_percentage=0))
def test_get_capacities_gb(self):
capval = 42115548133
useval = 19695128917
self._driver.rpc.call = mock.Mock(
return_value={'total_logical_capacity': six.text_type(capval),
'total_logical_usage': six.text_type(useval)})
self.assertEqual((39.223160718, 20.880642548),
self._driver._get_capacities())
@mock.patch.object(quobyte.QuobyteShareDriver,
"_resolve_volume_name",
return_value="fake_uuid")
def test_ensure_share(self, mock_qb_resolve_volname):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
result = self._driver.ensure_share(self._context, self.share, None)
self.assertEqual(self.share["export_location"], result)
(mock_qb_resolve_volname.
assert_called_once_with(self.share['name'],
self.share['project_id']))
self._driver.rpc.call.assert_has_calls([
mock.call('exportVolume', dict(
volume_uuid="fake_uuid",
protocol='NFS'
))])
@mock.patch.object(quobyte.QuobyteShareDriver,
"_resolve_volume_name",
return_value=None)
def test_ensure_deleted_share(self, mock_qb_resolve_volname):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
self.assertRaises(exception.ShareResourceNotFound,
self._driver.ensure_share,
self._context, self.share, None)
(mock_qb_resolve_volname.
assert_called_once_with(self.share['name'],
self.share['project_id']))
@mock.patch.object(quobyte.QuobyteShareDriver, "_resize_share")
def test_extend_share(self, mock_qsd_resize_share):
self._driver.extend_share(ext_share=self.share,
ext_size=2,
share_server=None)
mock_qsd_resize_share.assert_called_once_with(share=self.share,
new_size=2)
def test_resize_share(self):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
self._driver._resize_share(share=self.share, new_size=7)
self._driver.rpc.call.assert_has_calls([
mock.call('setQuota',
{"consumer": {"type": 3,
"identifier": self.share["name"]},
"limits": {"type": 5, "value": 7}})])
@mock.patch.object(quobyte.QuobyteShareDriver,
"_resolve_volume_name",
return_value="fake_id_3")
def test_fetch_existing_access(self, mock_qb_resolve_volname):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
old_access_1 = create_fake_access(access_id="old_1",
access_adr="10.0.0.4")
old_access_2 = create_fake_access(access_id="old_2",
access_adr="10.0.0.5")
exist_list = self._driver._fetch_existing_access(context=self._context,
share=self.share)
# assert expected result here
self.assertEqual([old_access_1['access_to'],
old_access_2['access_to']],
[e.get('access_to') for e in exist_list])
(mock_qb_resolve_volname.
assert_called_once_with(self.share['name'],
self.share['project_id']))
@mock.patch.object(quobyte.QuobyteShareDriver, "_resize_share")
def test_shrink_share(self, mock_qsd_resize_share):
self._driver.shrink_share(shrink_share=self.share,
shrink_size=3,
share_server=None)
mock_qsd_resize_share.assert_called_once_with(share=self.share,
new_size=3)
def test_subtract_access_lists(self):
access_1 = create_fake_access(access_id="new_1",
access_adr="10.0.0.5",
access_type="rw",)
access_2 = create_fake_access(access_id="old_1",
access_adr="10.0.0.1",
access_type="rw")
access_3 = create_fake_access(access_id="old_2",
access_adr="10.0.0.3",
access_type="ro")
access_4 = create_fake_access(access_id="new_2",
access_adr="10.0.0.6",
access_type="rw")
access_5 = create_fake_access(access_id="old_3",
access_adr="10.0.0.4",
access_type="rw")
min_list = [access_1, access_2, access_3, access_4]
sub_list = [access_5, access_3, access_2]
self.assertEqual([access_1, access_4],
self._driver._subtract_access_lists(min_list,
sub_list))
def test_subtract_access_lists_level(self):
access_1 = create_fake_access(access_id="new_1",
access_adr="10.0.0.5",
access_level="rw")
access_2 = create_fake_access(access_id="old_1",
access_adr="10.0.0.1",
access_level="rw")
access_3 = create_fake_access(access_id="old_2",
access_adr="10.0.0.3",
access_level="rw")
access_4 = create_fake_access(access_id="new_2",
access_adr="10.0.0.6",
access_level="rw")
access_5 = create_fake_access(access_id="old_2_ro",
access_adr="10.0.0.3",
access_level="ro")
min_list = [access_1, access_2, access_3, access_4]
sub_list = [access_5, access_2]
self.assertEqual([access_1, access_3, access_4],
self._driver._subtract_access_lists(min_list,
sub_list))
def test_subtract_access_lists_type(self):
access_1 = create_fake_access(access_id="new_1",
access_adr="10.0.0.5",
access_type="ip")
access_2 = create_fake_access(access_id="old_1",
access_adr="10.0.0.1",
access_type="ip")
access_3 = create_fake_access(access_id="old_2",
access_adr="10.0.0.3",
access_type="ip")
access_4 = create_fake_access(access_id="new_2",
access_adr="10.0.0.6",
access_type="ip")
access_5 = create_fake_access(access_id="old_2_ro",
access_adr="10.0.0.3",
access_type="other")
min_list = [access_1, access_2, access_3, access_4]
sub_list = [access_5, access_2]
self.assertEqual([access_1, access_3, access_4],
self._driver._subtract_access_lists(min_list,
sub_list))
@mock.patch.object(quobyte.QuobyteShareDriver, "_allow_access")
@mock.patch.object(quobyte.QuobyteShareDriver, "_deny_access")
def test_update_access_add_delete(self, qb_deny_mock, qb_allow_mock):
access_1 = create_fake_access(access_id="new_1",
access_adr="10.0.0.5",
access_level="rw")
access_2 = create_fake_access(access_id="old_1",
access_adr="10.0.0.1",
access_level="rw")
access_3 = create_fake_access(access_id="old_2",
access_adr="10.0.0.3",
access_level="rw")
self._driver.update_access(self._context,
self.share,
access_rules=None,
add_rules=[access_1],
delete_rules=[access_2, access_3])
qb_allow_mock.assert_called_once_with(self._context,
self.share, access_1)
deny_calls = [mock.call(self._context, self.share, access_2),
mock.call(self._context, self.share, access_3)]
qb_deny_mock.assert_has_calls(deny_calls)
@mock.patch.object(quobyte.LOG, "warning")
def test_update_access_no_rules(self, qb_log_mock):
self._driver.update_access(context=None, share=None, access_rules=[],
add_rules=[], delete_rules=[])
qb_log_mock.assert_has_calls([mock.ANY])
@mock.patch.object(quobyte.QuobyteShareDriver, "_subtract_access_lists")
@mock.patch.object(quobyte.QuobyteShareDriver, "_fetch_existing_access")
@mock.patch.object(quobyte.QuobyteShareDriver, "_allow_access")
def test_update_access_recovery_additionals(self,
qb_allow_mock,
qb_exist_mock,
qb_subtr_mock):
new_access_1 = create_fake_access(access_id="new_1",
access_adr="10.0.0.2")
old_access = create_fake_access(access_id="fake_access_id",
access_adr="10.0.0.1")
new_access_2 = create_fake_access(access_id="new_2",
access_adr="10.0.0.3")
add_access_rules = [new_access_1,
old_access,
new_access_2]
qb_exist_mock.return_value = [old_access]
qb_subtr_mock.side_effect = [[new_access_1, new_access_2], []]
self._driver.update_access(self._context, self.share,
access_rules=add_access_rules, add_rules=[],
delete_rules=[])
assert_calls = [mock.call(self._context, self.share, new_access_1),
mock.call(self._context, self.share, new_access_2)]
qb_allow_mock.assert_has_calls(assert_calls, any_order=True)
qb_exist_mock.assert_called_once_with(self._context, self.share)
@mock.patch.object(quobyte.QuobyteShareDriver, "_subtract_access_lists")
@mock.patch.object(quobyte.QuobyteShareDriver, "_fetch_existing_access")
@mock.patch.object(quobyte.QuobyteShareDriver, "_deny_access")
def test_update_access_recovery_superfluous(self,
qb_deny_mock,
qb_exist_mock,
qb_subtr_mock):
old_access_1 = create_fake_access(access_id="old_1",
access_adr="10.0.0.1")
missing_access_1 = create_fake_access(access_id="mis_1",
access_adr="10.0.0.2")
old_access_2 = create_fake_access(access_id="old_2",
access_adr="10.0.0.3")
qb_exist_mock.side_effect = [[old_access_1, old_access_2]]
qb_subtr_mock.side_effect = [[], [missing_access_1]]
old_access_rules = [old_access_1, old_access_2]
self._driver.update_access(self._context, self.share,
access_rules=old_access_rules, add_rules=[],
delete_rules=[])
qb_deny_mock.assert_called_once_with(self._context,
self.share,
(missing_access_1))
qb_exist_mock.assert_called_once_with(self._context, self.share)
@mock.patch.object(quobyte.QuobyteShareDriver, "_subtract_access_lists")
@mock.patch.object(quobyte.QuobyteShareDriver, "_fetch_existing_access")
@mock.patch.object(quobyte.QuobyteShareDriver, "_deny_access")
@mock.patch.object(quobyte.QuobyteShareDriver, "_allow_access")
def test_update_access_recovery_add_superfluous(self,
qb_allow_mock,
qb_deny_mock,
qb_exist_mock,
qb_subtr_mock):
new_access_1 = create_fake_access(access_id="new_1",
access_adr="10.0.0.5")
old_access_1 = create_fake_access(access_id="old_1",
access_adr="10.0.0.1")
old_access_2 = create_fake_access(access_id="old_2",
access_adr="10.0.0.3")
old_access_3 = create_fake_access(access_id="old_3",
access_adr="10.0.0.4")
miss_access_1 = create_fake_access(access_id="old_3",
access_adr="10.0.0.4")
new_access_2 = create_fake_access(access_id="new_2",
access_adr="10.0.0.3",
access_level="ro")
new_access_rules = [new_access_1, old_access_1, old_access_2,
old_access_3, new_access_2]
qb_exist_mock.return_value = [old_access_1, old_access_2,
old_access_3, miss_access_1]
qb_subtr_mock.side_effect = [[new_access_1, new_access_2],
[miss_access_1, old_access_2]]
self._driver.update_access(self._context, self.share,
new_access_rules, add_rules=[],
delete_rules=[])
a_calls = [mock.call(self._context, self.share, new_access_1),
mock.call(self._context, self.share, new_access_2)]
qb_allow_mock.assert_has_calls(a_calls)
b_calls = [mock.call(self._context, self.share, miss_access_1),
mock.call(self._context, self.share, old_access_2)]
qb_deny_mock.assert_has_calls(b_calls)
qb_exist_mock.assert_called_once_with(self._context, self.share)
| 44.746269
| 79
| 0.552998
|
import mock
from oslo_config import cfg
import six
from manila import context
from manila import exception
from manila.share import configuration as config
from manila.share import driver
from manila.share.drivers.quobyte import jsonrpc
from manila.share.drivers.quobyte import quobyte
from manila import test
from manila.tests import fake_share
CONF = cfg.CONF
def fake_rpc_handler(name, *args):
if name == 'resolveVolumeName':
return None
elif name == 'createVolume':
return {'volume_uuid': 'voluuid'}
elif name == 'exportVolume':
return {'nfs_server_ip': 'fake_location',
'nfs_export_path': '/fake_share'}
elif name == 'getConfiguration':
return {
"tenant_configuration": [{
"domain_name": "fake_domain_name",
"volume_access": [
{"volume_uuid": "fake_id_1",
"restrict_to_network": "10.0.0.1",
"read_only": False},
{"volume_uuid": "fake_id_1",
"restrict_to_network": "10.0.0.2",
"read_only": False},
{"volume_uuid": "fake_id_2",
"restrict_to_network": "10.0.0.3",
"read_only": False}
]},
{"domain_name": "fake_domain_name_2",
"volume_access": [
{"volume_uuid": "fake_id_3",
"restrict_to_network": "10.0.0.4",
"read_only": False},
{"volume_uuid": "fake_id_3",
"restrict_to_network": "10.0.0.5",
"read_only": True},
{"volume_uuid": "fake_id_4",
"restrict_to_network": "10.0.0.6",
"read_only": False}
]}
]
}
else:
return "Unknown fake rpc handler call"
def create_fake_access(access_adr,
access_id='fake_access_id',
access_type='ip',
access_level='rw'):
return {
'access_id': access_id,
'access_type': access_type,
'access_to': access_adr,
'access_level': access_level
}
class QuobyteShareDriverTestCase(test.TestCase):
def setUp(self):
super(QuobyteShareDriverTestCase, self).setUp()
self._context = context.get_admin_context()
CONF.set_default('driver_handles_share_servers', False)
self.fake_conf = config.Configuration(None)
self._driver = quobyte.QuobyteShareDriver(configuration=self.fake_conf)
self._driver.rpc = mock.Mock()
self.share = fake_share.fake_share(share_proto='NFS')
self.access = fake_share.fake_access()
@mock.patch('manila.share.drivers.quobyte.jsonrpc.JsonRpc', mock.Mock())
def test_do_setup_success(self):
self._driver.rpc.call = mock.Mock(return_value=None)
self._driver.do_setup(self._context)
self._driver.rpc.call.assert_called_with('getInformation', {})
@mock.patch('manila.share.drivers.quobyte.jsonrpc.JsonRpc.__init__',
mock.Mock(return_value=None))
@mock.patch.object(jsonrpc.JsonRpc, 'call',
side_effect=exception.QBRpcException)
def test_do_setup_failure(self, mock_call):
self.assertRaises(exception.QBException,
self._driver.do_setup, self._context)
def test_create_share_new_volume(self):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
result = self._driver.create_share(self._context, self.share)
self.assertEqual(self.share['export_location'], result)
self._driver.rpc.call.assert_has_calls([
mock.call('createVolume', dict(
name=self.share['name'],
tenant_domain=self.share['project_id'],
root_user_id=self.fake_conf.quobyte_default_volume_user,
root_group_id=self.fake_conf.quobyte_default_volume_group,
configuration_name=self.fake_conf.quobyte_volume_configuration
)),
mock.call('exportVolume',
dict(protocol='NFS', volume_uuid='voluuid'))])
def test_create_share_existing_volume(self):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
self._driver.create_share(self._context, self.share)
self._driver.rpc.call.assert_called_with(
'exportVolume', dict(protocol='NFS', volume_uuid='voluuid'))
def test_create_share_wrong_protocol(self):
share = {'share_proto': 'WRONG_PROTOCOL'}
self.assertRaises(exception.QBException,
self._driver.create_share,
context=None,
share=share)
def test_delete_share_existing_volume(self):
def rpc_handler(name, *args):
if name == 'resolveVolumeName':
return {'volume_uuid': 'voluuid'}
elif name == 'exportVolume':
return {}
self._driver.configuration.quobyte_delete_shares = True
self._driver.rpc.call = mock.Mock(wraps=rpc_handler)
self._driver.delete_share(self._context, self.share)
self._driver.rpc.call.assert_has_calls([
mock.call('resolveVolumeName',
{'volume_name': 'fakename',
'tenant_domain': 'fake_project_uuid'}),
mock.call('deleteVolume', {'volume_uuid': 'voluuid'}),
mock.call('exportVolume', {'volume_uuid': 'voluuid',
'remove_export': True})])
def test_delete_share_existing_volume_disabled(self):
def rpc_handler(name, *args):
if name == 'resolveVolumeName':
return {'volume_uuid': 'voluuid'}
elif name == 'exportVolume':
return {}
CONF.set_default('quobyte_delete_shares', False)
self._driver.rpc.call = mock.Mock(wraps=rpc_handler)
self._driver.delete_share(self._context, self.share)
self._driver.rpc.call.assert_called_with(
'exportVolume', {'volume_uuid': 'voluuid',
'remove_export': True})
@mock.patch.object(quobyte.LOG, 'warning')
def test_delete_share_nonexisting_volume(self, mock_warning):
def rpc_handler(name, *args):
if name == 'resolveVolumeName':
return None
self._driver.rpc.call = mock.Mock(wraps=rpc_handler)
self._driver.delete_share(self._context, self.share)
mock_warning.assert_called_with(
'No volume found for share fake_project_uuid/fakename')
def test_allow_access(self):
def rpc_handler(name, *args):
if name == 'resolveVolumeName':
return {'volume_uuid': 'voluuid'}
elif name == 'exportVolume':
return {'nfs_server_ip': '10.10.1.1',
'nfs_export_path': '/voluuid'}
self._driver.rpc.call = mock.Mock(wraps=rpc_handler)
self._driver._allow_access(self._context, self.share, self.access)
self._driver.rpc.call.assert_called_with(
'exportVolume', {'volume_uuid': 'voluuid',
'read_only': False,
'add_allow_ip': '10.0.0.1'})
def test_allow_ro_access(self):
def rpc_handler(name, *args):
if name == 'resolveVolumeName':
return {'volume_uuid': 'voluuid'}
elif name == 'exportVolume':
return {'nfs_server_ip': '10.10.1.1',
'nfs_export_path': '/voluuid'}
self._driver.rpc.call = mock.Mock(wraps=rpc_handler)
ro_access = fake_share.fake_access(access_level='ro')
self._driver._allow_access(self._context, self.share, ro_access)
self._driver.rpc.call.assert_called_with(
'exportVolume', {'volume_uuid': 'voluuid',
'read_only': True,
'add_allow_ip': '10.0.0.1'})
def test_allow_access_nonip(self):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
self.access = fake_share.fake_access(**{"access_type":
"non_existant_access_type"})
self.assertRaises(exception.InvalidShareAccess,
self._driver._allow_access,
self._context, self.share, self.access)
def test_deny_access(self):
def rpc_handler(name, *args):
if name == 'resolveVolumeName':
return {'volume_uuid': 'voluuid'}
elif name == 'exportVolume':
return {'nfs_server_ip': '10.10.1.1',
'nfs_export_path': '/voluuid'}
self._driver.rpc.call = mock.Mock(wraps=rpc_handler)
self._driver._deny_access(self._context, self.share, self.access)
self._driver.rpc.call.assert_called_with(
'exportVolume',
{'volume_uuid': 'voluuid', 'remove_allow_ip': '10.0.0.1'})
@mock.patch.object(quobyte.LOG, 'debug')
def test_deny_access_nonip(self, mock_debug):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
self.access = fake_share.fake_access(
access_type="non_existant_access_type")
self._driver._deny_access(self._context, self.share, self.access)
mock_debug.assert_called_with(
'Quobyte driver only supports ip access control. '
'Ignoring deny access call for %s , %s',
'fakename', 'fake_project_uuid')
def test_resolve_volume_name(self):
self._driver.rpc.call = mock.Mock(
return_value={'volume_uuid': 'fake_uuid'})
self._driver._resolve_volume_name('fake_vol_name', 'fake_domain_name')
self._driver.rpc.call.assert_called_with(
'resolveVolumeName',
{'volume_name': 'fake_vol_name',
'tenant_domain': 'fake_domain_name'})
def test_resolve_volume_name_NOENT(self):
self._driver.rpc.call = mock.Mock(
return_value=None)
self.assertIsNone(
self._driver._resolve_volume_name('fake_vol_name',
'fake_domain_name'))
def test_resolve_volume_name_other_error(self):
self._driver.rpc.call = mock.Mock(
side_effect=exception.QBRpcException(
result='fubar',
qbcode=666))
self.assertRaises(exception.QBRpcException,
self._driver._resolve_volume_name,
volume_name='fake_vol_name',
tenant_domain='fake_domain_name')
@mock.patch.object(driver.ShareDriver, '_update_share_stats')
def test_update_share_stats(self, mock_uss):
self._driver._get_capacities = mock.Mock(return_value=[42, 23])
self._driver._update_share_stats()
mock_uss.assert_called_once_with(
dict(storage_protocol='NFS',
vendor_name='Quobyte',
share_backend_name=self._driver.backend_name,
driver_version=self._driver.DRIVER_VERSION,
total_capacity_gb=42,
free_capacity_gb=23,
reserved_percentage=0))
def test_get_capacities_gb(self):
capval = 42115548133
useval = 19695128917
self._driver.rpc.call = mock.Mock(
return_value={'total_logical_capacity': six.text_type(capval),
'total_logical_usage': six.text_type(useval)})
self.assertEqual((39.223160718, 20.880642548),
self._driver._get_capacities())
@mock.patch.object(quobyte.QuobyteShareDriver,
"_resolve_volume_name",
return_value="fake_uuid")
def test_ensure_share(self, mock_qb_resolve_volname):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
result = self._driver.ensure_share(self._context, self.share, None)
self.assertEqual(self.share["export_location"], result)
(mock_qb_resolve_volname.
assert_called_once_with(self.share['name'],
self.share['project_id']))
self._driver.rpc.call.assert_has_calls([
mock.call('exportVolume', dict(
volume_uuid="fake_uuid",
protocol='NFS'
))])
@mock.patch.object(quobyte.QuobyteShareDriver,
"_resolve_volume_name",
return_value=None)
def test_ensure_deleted_share(self, mock_qb_resolve_volname):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
self.assertRaises(exception.ShareResourceNotFound,
self._driver.ensure_share,
self._context, self.share, None)
(mock_qb_resolve_volname.
assert_called_once_with(self.share['name'],
self.share['project_id']))
@mock.patch.object(quobyte.QuobyteShareDriver, "_resize_share")
def test_extend_share(self, mock_qsd_resize_share):
self._driver.extend_share(ext_share=self.share,
ext_size=2,
share_server=None)
mock_qsd_resize_share.assert_called_once_with(share=self.share,
new_size=2)
def test_resize_share(self):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
self._driver._resize_share(share=self.share, new_size=7)
self._driver.rpc.call.assert_has_calls([
mock.call('setQuota',
{"consumer": {"type": 3,
"identifier": self.share["name"]},
"limits": {"type": 5, "value": 7}})])
@mock.patch.object(quobyte.QuobyteShareDriver,
"_resolve_volume_name",
return_value="fake_id_3")
def test_fetch_existing_access(self, mock_qb_resolve_volname):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
old_access_1 = create_fake_access(access_id="old_1",
access_adr="10.0.0.4")
old_access_2 = create_fake_access(access_id="old_2",
access_adr="10.0.0.5")
exist_list = self._driver._fetch_existing_access(context=self._context,
share=self.share)
self.assertEqual([old_access_1['access_to'],
old_access_2['access_to']],
[e.get('access_to') for e in exist_list])
(mock_qb_resolve_volname.
assert_called_once_with(self.share['name'],
self.share['project_id']))
@mock.patch.object(quobyte.QuobyteShareDriver, "_resize_share")
def test_shrink_share(self, mock_qsd_resize_share):
self._driver.shrink_share(shrink_share=self.share,
shrink_size=3,
share_server=None)
mock_qsd_resize_share.assert_called_once_with(share=self.share,
new_size=3)
def test_subtract_access_lists(self):
access_1 = create_fake_access(access_id="new_1",
access_adr="10.0.0.5",
access_type="rw",)
access_2 = create_fake_access(access_id="old_1",
access_adr="10.0.0.1",
access_type="rw")
access_3 = create_fake_access(access_id="old_2",
access_adr="10.0.0.3",
access_type="ro")
access_4 = create_fake_access(access_id="new_2",
access_adr="10.0.0.6",
access_type="rw")
access_5 = create_fake_access(access_id="old_3",
access_adr="10.0.0.4",
access_type="rw")
min_list = [access_1, access_2, access_3, access_4]
sub_list = [access_5, access_3, access_2]
self.assertEqual([access_1, access_4],
self._driver._subtract_access_lists(min_list,
sub_list))
def test_subtract_access_lists_level(self):
access_1 = create_fake_access(access_id="new_1",
access_adr="10.0.0.5",
access_level="rw")
access_2 = create_fake_access(access_id="old_1",
access_adr="10.0.0.1",
access_level="rw")
access_3 = create_fake_access(access_id="old_2",
access_adr="10.0.0.3",
access_level="rw")
access_4 = create_fake_access(access_id="new_2",
access_adr="10.0.0.6",
access_level="rw")
access_5 = create_fake_access(access_id="old_2_ro",
access_adr="10.0.0.3",
access_level="ro")
min_list = [access_1, access_2, access_3, access_4]
sub_list = [access_5, access_2]
self.assertEqual([access_1, access_3, access_4],
self._driver._subtract_access_lists(min_list,
sub_list))
def test_subtract_access_lists_type(self):
access_1 = create_fake_access(access_id="new_1",
access_adr="10.0.0.5",
access_type="ip")
access_2 = create_fake_access(access_id="old_1",
access_adr="10.0.0.1",
access_type="ip")
access_3 = create_fake_access(access_id="old_2",
access_adr="10.0.0.3",
access_type="ip")
access_4 = create_fake_access(access_id="new_2",
access_adr="10.0.0.6",
access_type="ip")
access_5 = create_fake_access(access_id="old_2_ro",
access_adr="10.0.0.3",
access_type="other")
min_list = [access_1, access_2, access_3, access_4]
sub_list = [access_5, access_2]
self.assertEqual([access_1, access_3, access_4],
self._driver._subtract_access_lists(min_list,
sub_list))
@mock.patch.object(quobyte.QuobyteShareDriver, "_allow_access")
@mock.patch.object(quobyte.QuobyteShareDriver, "_deny_access")
def test_update_access_add_delete(self, qb_deny_mock, qb_allow_mock):
access_1 = create_fake_access(access_id="new_1",
access_adr="10.0.0.5",
access_level="rw")
access_2 = create_fake_access(access_id="old_1",
access_adr="10.0.0.1",
access_level="rw")
access_3 = create_fake_access(access_id="old_2",
access_adr="10.0.0.3",
access_level="rw")
self._driver.update_access(self._context,
self.share,
access_rules=None,
add_rules=[access_1],
delete_rules=[access_2, access_3])
qb_allow_mock.assert_called_once_with(self._context,
self.share, access_1)
deny_calls = [mock.call(self._context, self.share, access_2),
mock.call(self._context, self.share, access_3)]
qb_deny_mock.assert_has_calls(deny_calls)
@mock.patch.object(quobyte.LOG, "warning")
def test_update_access_no_rules(self, qb_log_mock):
self._driver.update_access(context=None, share=None, access_rules=[],
add_rules=[], delete_rules=[])
qb_log_mock.assert_has_calls([mock.ANY])
@mock.patch.object(quobyte.QuobyteShareDriver, "_subtract_access_lists")
@mock.patch.object(quobyte.QuobyteShareDriver, "_fetch_existing_access")
@mock.patch.object(quobyte.QuobyteShareDriver, "_allow_access")
def test_update_access_recovery_additionals(self,
qb_allow_mock,
qb_exist_mock,
qb_subtr_mock):
new_access_1 = create_fake_access(access_id="new_1",
access_adr="10.0.0.2")
old_access = create_fake_access(access_id="fake_access_id",
access_adr="10.0.0.1")
new_access_2 = create_fake_access(access_id="new_2",
access_adr="10.0.0.3")
add_access_rules = [new_access_1,
old_access,
new_access_2]
qb_exist_mock.return_value = [old_access]
qb_subtr_mock.side_effect = [[new_access_1, new_access_2], []]
self._driver.update_access(self._context, self.share,
access_rules=add_access_rules, add_rules=[],
delete_rules=[])
assert_calls = [mock.call(self._context, self.share, new_access_1),
mock.call(self._context, self.share, new_access_2)]
qb_allow_mock.assert_has_calls(assert_calls, any_order=True)
qb_exist_mock.assert_called_once_with(self._context, self.share)
@mock.patch.object(quobyte.QuobyteShareDriver, "_subtract_access_lists")
@mock.patch.object(quobyte.QuobyteShareDriver, "_fetch_existing_access")
@mock.patch.object(quobyte.QuobyteShareDriver, "_deny_access")
def test_update_access_recovery_superfluous(self,
qb_deny_mock,
qb_exist_mock,
qb_subtr_mock):
old_access_1 = create_fake_access(access_id="old_1",
access_adr="10.0.0.1")
missing_access_1 = create_fake_access(access_id="mis_1",
access_adr="10.0.0.2")
old_access_2 = create_fake_access(access_id="old_2",
access_adr="10.0.0.3")
qb_exist_mock.side_effect = [[old_access_1, old_access_2]]
qb_subtr_mock.side_effect = [[], [missing_access_1]]
old_access_rules = [old_access_1, old_access_2]
self._driver.update_access(self._context, self.share,
access_rules=old_access_rules, add_rules=[],
delete_rules=[])
qb_deny_mock.assert_called_once_with(self._context,
self.share,
(missing_access_1))
qb_exist_mock.assert_called_once_with(self._context, self.share)
@mock.patch.object(quobyte.QuobyteShareDriver, "_subtract_access_lists")
@mock.patch.object(quobyte.QuobyteShareDriver, "_fetch_existing_access")
@mock.patch.object(quobyte.QuobyteShareDriver, "_deny_access")
@mock.patch.object(quobyte.QuobyteShareDriver, "_allow_access")
def test_update_access_recovery_add_superfluous(self,
qb_allow_mock,
qb_deny_mock,
qb_exist_mock,
qb_subtr_mock):
new_access_1 = create_fake_access(access_id="new_1",
access_adr="10.0.0.5")
old_access_1 = create_fake_access(access_id="old_1",
access_adr="10.0.0.1")
old_access_2 = create_fake_access(access_id="old_2",
access_adr="10.0.0.3")
old_access_3 = create_fake_access(access_id="old_3",
access_adr="10.0.0.4")
miss_access_1 = create_fake_access(access_id="old_3",
access_adr="10.0.0.4")
new_access_2 = create_fake_access(access_id="new_2",
access_adr="10.0.0.3",
access_level="ro")
new_access_rules = [new_access_1, old_access_1, old_access_2,
old_access_3, new_access_2]
qb_exist_mock.return_value = [old_access_1, old_access_2,
old_access_3, miss_access_1]
qb_subtr_mock.side_effect = [[new_access_1, new_access_2],
[miss_access_1, old_access_2]]
self._driver.update_access(self._context, self.share,
new_access_rules, add_rules=[],
delete_rules=[])
a_calls = [mock.call(self._context, self.share, new_access_1),
mock.call(self._context, self.share, new_access_2)]
qb_allow_mock.assert_has_calls(a_calls)
b_calls = [mock.call(self._context, self.share, miss_access_1),
mock.call(self._context, self.share, old_access_2)]
qb_deny_mock.assert_has_calls(b_calls)
qb_exist_mock.assert_called_once_with(self._context, self.share)
| true
| true
|
f7178564a143b7e9c9ec5547d04715becbaafd35
| 1,183
|
py
|
Python
|
scripts/pyqtgraph-develop/examples/MultiPlotWidget.py
|
kuldeepaman/tf-pose
|
8050912c52a7b4f3c8a2656f267d47ba21d093f6
|
[
"Apache-2.0"
] | null | null | null |
scripts/pyqtgraph-develop/examples/MultiPlotWidget.py
|
kuldeepaman/tf-pose
|
8050912c52a7b4f3c8a2656f267d47ba21d093f6
|
[
"Apache-2.0"
] | null | null | null |
scripts/pyqtgraph-develop/examples/MultiPlotWidget.py
|
kuldeepaman/tf-pose
|
8050912c52a7b4f3c8a2656f267d47ba21d093f6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
## Add path to library (just for examples; you do not need this)
import initExample
from scipy import random
from numpy import linspace
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
from pyqtgraph import MultiPlotWidget
try:
from pyqtgraph.metaarray import *
except:
print("MultiPlot is only used with MetaArray for now (and you do not have the metaarray package)")
exit()
app = QtGui.QApplication([])
mw = QtGui.QMainWindow()
mw.resize(800,800)
pw = MultiPlotWidget()
mw.setCentralWidget(pw)
mw.show()
data = random.normal(size=(3, 1000)) * np.array([[0.1], [1e-5], [1]])
ma = MetaArray(data, info=[
{'name': 'Signal', 'cols': [
{'name': 'Col1', 'units': 'V'},
{'name': 'Col2', 'units': 'A'},
{'name': 'Col3'},
]},
{'name': 'Time', 'values': linspace(0., 1., 1000), 'units': 's'}
])
pw.plot(ma)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| 28.166667
| 103
| 0.618766
|
port linspace
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
from pyqtgraph import MultiPlotWidget
try:
from pyqtgraph.metaarray import *
except:
print("MultiPlot is only used with MetaArray for now (and you do not have the metaarray package)")
exit()
app = QtGui.QApplication([])
mw = QtGui.QMainWindow()
mw.resize(800,800)
pw = MultiPlotWidget()
mw.setCentralWidget(pw)
mw.show()
data = random.normal(size=(3, 1000)) * np.array([[0.1], [1e-5], [1]])
ma = MetaArray(data, info=[
{'name': 'Signal', 'cols': [
{'name': 'Col1', 'units': 'V'},
{'name': 'Col2', 'units': 'A'},
{'name': 'Col3'},
]},
{'name': 'Time', 'values': linspace(0., 1., 1000), 'units': 's'}
])
pw.plot(ma)
lags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| true
| true
|
f71786f5f39627c1fac7f1b3ad0367fe2a1feb53
| 1,888
|
py
|
Python
|
silk-src/src/pysilk/python/_netsa_silk.py
|
mjschultz/netsa-pkg
|
07bf4ff29a73ebc0f58e4aa27d3ad6b1dee7fc83
|
[
"Apache-2.0"
] | 3
|
2018-06-01T06:55:14.000Z
|
2021-11-14T22:51:04.000Z
|
silk-src/src/pysilk/python/_netsa_silk.py
|
mjschultz/netsa-pkg
|
07bf4ff29a73ebc0f58e4aa27d3ad6b1dee7fc83
|
[
"Apache-2.0"
] | 3
|
2017-07-02T17:03:34.000Z
|
2021-09-09T17:05:31.000Z
|
silk-src/src/pysilk/python/_netsa_silk.py
|
mjschultz/netsa-pkg
|
07bf4ff29a73ebc0f58e4aa27d3ad6b1dee7fc83
|
[
"Apache-2.0"
] | 4
|
2017-08-14T15:42:31.000Z
|
2022-01-24T16:24:27.000Z
|
#######################################################################
# Copyright (C) 2011-2020 by Carnegie Mellon University.
#
# @OPENSOURCE_LICENSE_START@
# See license information in ../../../LICENSE.txt
# @OPENSOURCE_LICENSE_END@
#
#######################################################################
#######################################################################
# $SiLK: _netsa_silk.py ef14e54179be 2020-04-14 21:57:45Z mthomas $
#######################################################################
"""
The netsa_silk module contains a shared API for working with common
Internet data in both netsa-python and PySiLK. If netsa-python is
installed but PySiLK is not, the less efficient but more portable
pure-Python version of this functionality that is included in
netsa-python is used. If PySiLK is installed, then the
high-performance C version of this functionality that is included in
PySiLK is used.
"""
# This module provides the symbols exported by PySiLK for the
# netsa_silk API. It exists to rename PySiLK symbols that have a
# different name from the netsa_silk symbols, and to constrain the set
# of PySiLK symbols that are exported. If a new symbol is added (to
# provide a new feature), it need only be added here and it will
# automatically be exported by netsa_silk.
from silk import (
ipv6_enabled as has_IPv6Addr,
IPAddr, IPv4Addr, IPv6Addr,
IPSet as ip_set,
IPWildcard,
TCPFlags,
TCP_FIN, TCP_SYN, TCP_RST, TCP_PSH, TCP_ACK, TCP_URG, TCP_ECE, TCP_CWR,
silk_version
)
# PySiLK API version
__version__ = "1.0"
# Implementation version
__impl_version__ = " ".join(["SiLK", silk_version()])
__all__ = """
has_IPv6Addr
IPAddr IPv4Addr IPv6Addr
ip_set
IPWildcard
TCPFlags
TCP_FIN TCP_SYN TCP_RST TCP_PSH TCP_ACK TCP_URG TCP_ECE TCP_CWR
__version__
__impl_version__
""".split()
| 31.466667
| 75
| 0.632945
| true
| true
|
|
f717870bb59f4f607b6c5181895c66f109cd7bcf
| 523
|
py
|
Python
|
juriscraper/opinions/united_states/state/calctapp_1st.py
|
EvandoBlanco/juriscraper
|
3d16af258620d4ba1b4827f66ef69e8a2c5a0484
|
[
"BSD-2-Clause"
] | 228
|
2015-01-23T04:41:39.000Z
|
2022-03-30T09:52:20.000Z
|
juriscraper/opinions/united_states/state/calctapp_1st.py
|
EvandoBlanco/juriscraper
|
3d16af258620d4ba1b4827f66ef69e8a2c5a0484
|
[
"BSD-2-Clause"
] | 331
|
2015-01-05T18:53:40.000Z
|
2022-03-29T23:43:30.000Z
|
juriscraper/opinions/united_states/state/calctapp_1st.py
|
EvandoBlanco/juriscraper
|
3d16af258620d4ba1b4827f66ef69e8a2c5a0484
|
[
"BSD-2-Clause"
] | 84
|
2015-01-03T01:19:21.000Z
|
2022-03-01T08:09:32.000Z
|
# Scraper for California's First District Court of Appeal
# CourtID: calctapp_1st
# Court Short Name: Cal. Ct. App.
from juriscraper.opinions.united_states.state import cal
class Site(cal.Site):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.court_code = "A"
self.division = "1st App. Dist."
self.url = self.build_url()
def _get_divisions(self):
return [self.division] * len(self.case_names)
| 29.055556
| 57
| 0.667304
|
# CourtID: calctapp_1st
# Court Short Name: Cal. Ct. App.
from juriscraper.opinions.united_states.state import cal
class Site(cal.Site):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.court_code = "A"
self.division = "1st App. Dist."
self.url = self.build_url()
def _get_divisions(self):
return [self.division] * len(self.case_names)
| true
| true
|
f71787192e7a235fa2e02fbfdcda490a79b3300e
| 12,258
|
py
|
Python
|
source/deepsecurity/models/anti_malware_computer_extension.py
|
felipecosta09/cloudone-workload-controltower-lifecycle
|
7927c84d164058b034fc872701b5ee117641f4d1
|
[
"Apache-2.0"
] | 1
|
2021-10-30T16:40:09.000Z
|
2021-10-30T16:40:09.000Z
|
source/deepsecurity/models/anti_malware_computer_extension.py
|
felipecosta09/cloudone-workload-controltower-lifecycle
|
7927c84d164058b034fc872701b5ee117641f4d1
|
[
"Apache-2.0"
] | 1
|
2021-07-28T20:19:03.000Z
|
2021-07-28T20:19:03.000Z
|
source/deepsecurity/models/anti_malware_computer_extension.py
|
felipecosta09/cloudone-workload-controltower-lifecycle
|
7927c84d164058b034fc872701b5ee117641f4d1
|
[
"Apache-2.0"
] | 1
|
2021-10-30T16:40:02.000Z
|
2021-10-30T16:40:02.000Z
|
# coding: utf-8
"""
Trend Micro Deep Security API
Copyright 2018 - 2020 Trend Micro Incorporated.<br/>Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501
OpenAPI spec version: 12.5.841
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from deepsecurity.models.computer_module_status import ComputerModuleStatus # noqa: F401,E501
class AntiMalwareComputerExtension(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'state': 'str',
'module_status': 'ComputerModuleStatus',
'real_time_scan_configuration_id': 'int',
'real_time_scan_schedule_id': 'int',
'manual_scan_configuration_id': 'int',
'scheduled_scan_configuration_id': 'int',
'last_manual_scan': 'int',
'last_scheduled_scan': 'int'
}
attribute_map = {
'state': 'state',
'module_status': 'moduleStatus',
'real_time_scan_configuration_id': 'realTimeScanConfigurationID',
'real_time_scan_schedule_id': 'realTimeScanScheduleID',
'manual_scan_configuration_id': 'manualScanConfigurationID',
'scheduled_scan_configuration_id': 'scheduledScanConfigurationID',
'last_manual_scan': 'lastManualScan',
'last_scheduled_scan': 'lastScheduledScan'
}
def __init__(self, state=None, module_status=None, real_time_scan_configuration_id=None, real_time_scan_schedule_id=None, manual_scan_configuration_id=None, scheduled_scan_configuration_id=None, last_manual_scan=None, last_scheduled_scan=None): # noqa: E501
"""AntiMalwareComputerExtension - a model defined in Swagger""" # noqa: E501
self._state = None
self._module_status = None
self._real_time_scan_configuration_id = None
self._real_time_scan_schedule_id = None
self._manual_scan_configuration_id = None
self._scheduled_scan_configuration_id = None
self._last_manual_scan = None
self._last_scheduled_scan = None
self.discriminator = None
if state is not None:
self.state = state
if module_status is not None:
self.module_status = module_status
if real_time_scan_configuration_id is not None:
self.real_time_scan_configuration_id = real_time_scan_configuration_id
if real_time_scan_schedule_id is not None:
self.real_time_scan_schedule_id = real_time_scan_schedule_id
if manual_scan_configuration_id is not None:
self.manual_scan_configuration_id = manual_scan_configuration_id
if scheduled_scan_configuration_id is not None:
self.scheduled_scan_configuration_id = scheduled_scan_configuration_id
if last_manual_scan is not None:
self.last_manual_scan = last_manual_scan
if last_scheduled_scan is not None:
self.last_scheduled_scan = last_scheduled_scan
@property
def state(self):
"""Gets the state of this AntiMalwareComputerExtension. # noqa: E501
Module state. # noqa: E501
:return: The state of this AntiMalwareComputerExtension. # noqa: E501
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this AntiMalwareComputerExtension.
Module state. # noqa: E501
:param state: The state of this AntiMalwareComputerExtension. # noqa: E501
:type: str
"""
allowed_values = ["inherited", "on", "off"] # noqa: E501
if state not in allowed_values:
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}" # noqa: E501
.format(state, allowed_values)
)
self._state = state
@property
def module_status(self):
"""Gets the module_status of this AntiMalwareComputerExtension. # noqa: E501
:return: The module_status of this AntiMalwareComputerExtension. # noqa: E501
:rtype: ComputerModuleStatus
"""
return self._module_status
@module_status.setter
def module_status(self, module_status):
"""Sets the module_status of this AntiMalwareComputerExtension.
:param module_status: The module_status of this AntiMalwareComputerExtension. # noqa: E501
:type: ComputerModuleStatus
"""
self._module_status = module_status
@property
def real_time_scan_configuration_id(self):
"""Gets the real_time_scan_configuration_id of this AntiMalwareComputerExtension. # noqa: E501
ID of the Real Time Scan Configuration. # noqa: E501
:return: The real_time_scan_configuration_id of this AntiMalwareComputerExtension. # noqa: E501
:rtype: int
"""
return self._real_time_scan_configuration_id
@real_time_scan_configuration_id.setter
def real_time_scan_configuration_id(self, real_time_scan_configuration_id):
"""Sets the real_time_scan_configuration_id of this AntiMalwareComputerExtension.
ID of the Real Time Scan Configuration. # noqa: E501
:param real_time_scan_configuration_id: The real_time_scan_configuration_id of this AntiMalwareComputerExtension. # noqa: E501
:type: int
"""
self._real_time_scan_configuration_id = real_time_scan_configuration_id
@property
def real_time_scan_schedule_id(self):
"""Gets the real_time_scan_schedule_id of this AntiMalwareComputerExtension. # noqa: E501
ID of the Real Time Schedule. # noqa: E501
:return: The real_time_scan_schedule_id of this AntiMalwareComputerExtension. # noqa: E501
:rtype: int
"""
return self._real_time_scan_schedule_id
@real_time_scan_schedule_id.setter
def real_time_scan_schedule_id(self, real_time_scan_schedule_id):
"""Sets the real_time_scan_schedule_id of this AntiMalwareComputerExtension.
ID of the Real Time Schedule. # noqa: E501
:param real_time_scan_schedule_id: The real_time_scan_schedule_id of this AntiMalwareComputerExtension. # noqa: E501
:type: int
"""
self._real_time_scan_schedule_id = real_time_scan_schedule_id
@property
def manual_scan_configuration_id(self):
"""Gets the manual_scan_configuration_id of this AntiMalwareComputerExtension. # noqa: E501
ID of the Manual Scan Configuration. # noqa: E501
:return: The manual_scan_configuration_id of this AntiMalwareComputerExtension. # noqa: E501
:rtype: int
"""
return self._manual_scan_configuration_id
@manual_scan_configuration_id.setter
def manual_scan_configuration_id(self, manual_scan_configuration_id):
"""Sets the manual_scan_configuration_id of this AntiMalwareComputerExtension.
ID of the Manual Scan Configuration. # noqa: E501
:param manual_scan_configuration_id: The manual_scan_configuration_id of this AntiMalwareComputerExtension. # noqa: E501
:type: int
"""
self._manual_scan_configuration_id = manual_scan_configuration_id
@property
def scheduled_scan_configuration_id(self):
"""Gets the scheduled_scan_configuration_id of this AntiMalwareComputerExtension. # noqa: E501
ID of the Scheduled Scan Configuration. # noqa: E501
:return: The scheduled_scan_configuration_id of this AntiMalwareComputerExtension. # noqa: E501
:rtype: int
"""
return self._scheduled_scan_configuration_id
@scheduled_scan_configuration_id.setter
def scheduled_scan_configuration_id(self, scheduled_scan_configuration_id):
"""Sets the scheduled_scan_configuration_id of this AntiMalwareComputerExtension.
ID of the Scheduled Scan Configuration. # noqa: E501
:param scheduled_scan_configuration_id: The scheduled_scan_configuration_id of this AntiMalwareComputerExtension. # noqa: E501
:type: int
"""
self._scheduled_scan_configuration_id = scheduled_scan_configuration_id
@property
def last_manual_scan(self):
"""Gets the last_manual_scan of this AntiMalwareComputerExtension. # noqa: E501
Timestamp of the last manual scan for malware, in milliseconds since epoch. # noqa: E501
:return: The last_manual_scan of this AntiMalwareComputerExtension. # noqa: E501
:rtype: int
"""
return self._last_manual_scan
@last_manual_scan.setter
def last_manual_scan(self, last_manual_scan):
"""Sets the last_manual_scan of this AntiMalwareComputerExtension.
Timestamp of the last manual scan for malware, in milliseconds since epoch. # noqa: E501
:param last_manual_scan: The last_manual_scan of this AntiMalwareComputerExtension. # noqa: E501
:type: int
"""
self._last_manual_scan = last_manual_scan
@property
def last_scheduled_scan(self):
"""Gets the last_scheduled_scan of this AntiMalwareComputerExtension. # noqa: E501
Timestamp of the last scheduled scan for malware, in milliseconds since epoch. # noqa: E501
:return: The last_scheduled_scan of this AntiMalwareComputerExtension. # noqa: E501
:rtype: int
"""
return self._last_scheduled_scan
@last_scheduled_scan.setter
def last_scheduled_scan(self, last_scheduled_scan):
"""Sets the last_scheduled_scan of this AntiMalwareComputerExtension.
Timestamp of the last scheduled scan for malware, in milliseconds since epoch. # noqa: E501
:param last_scheduled_scan: The last_scheduled_scan of this AntiMalwareComputerExtension. # noqa: E501
:type: int
"""
self._last_scheduled_scan = last_scheduled_scan
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AntiMalwareComputerExtension, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AntiMalwareComputerExtension):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 38.186916
| 311
| 0.660548
|
import pprint
import re
import six
from deepsecurity.models.computer_module_status import ComputerModuleStatus
class AntiMalwareComputerExtension(object):
swagger_types = {
'state': 'str',
'module_status': 'ComputerModuleStatus',
'real_time_scan_configuration_id': 'int',
'real_time_scan_schedule_id': 'int',
'manual_scan_configuration_id': 'int',
'scheduled_scan_configuration_id': 'int',
'last_manual_scan': 'int',
'last_scheduled_scan': 'int'
}
attribute_map = {
'state': 'state',
'module_status': 'moduleStatus',
'real_time_scan_configuration_id': 'realTimeScanConfigurationID',
'real_time_scan_schedule_id': 'realTimeScanScheduleID',
'manual_scan_configuration_id': 'manualScanConfigurationID',
'scheduled_scan_configuration_id': 'scheduledScanConfigurationID',
'last_manual_scan': 'lastManualScan',
'last_scheduled_scan': 'lastScheduledScan'
}
def __init__(self, state=None, module_status=None, real_time_scan_configuration_id=None, real_time_scan_schedule_id=None, manual_scan_configuration_id=None, scheduled_scan_configuration_id=None, last_manual_scan=None, last_scheduled_scan=None):
self._state = None
self._module_status = None
self._real_time_scan_configuration_id = None
self._real_time_scan_schedule_id = None
self._manual_scan_configuration_id = None
self._scheduled_scan_configuration_id = None
self._last_manual_scan = None
self._last_scheduled_scan = None
self.discriminator = None
if state is not None:
self.state = state
if module_status is not None:
self.module_status = module_status
if real_time_scan_configuration_id is not None:
self.real_time_scan_configuration_id = real_time_scan_configuration_id
if real_time_scan_schedule_id is not None:
self.real_time_scan_schedule_id = real_time_scan_schedule_id
if manual_scan_configuration_id is not None:
self.manual_scan_configuration_id = manual_scan_configuration_id
if scheduled_scan_configuration_id is not None:
self.scheduled_scan_configuration_id = scheduled_scan_configuration_id
if last_manual_scan is not None:
self.last_manual_scan = last_manual_scan
if last_scheduled_scan is not None:
self.last_scheduled_scan = last_scheduled_scan
@property
def state(self):
return self._state
@state.setter
def state(self, state):
allowed_values = ["inherited", "on", "off"]
if state not in allowed_values:
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}"
.format(state, allowed_values)
)
self._state = state
@property
def module_status(self):
return self._module_status
@module_status.setter
def module_status(self, module_status):
self._module_status = module_status
@property
def real_time_scan_configuration_id(self):
return self._real_time_scan_configuration_id
@real_time_scan_configuration_id.setter
def real_time_scan_configuration_id(self, real_time_scan_configuration_id):
self._real_time_scan_configuration_id = real_time_scan_configuration_id
@property
def real_time_scan_schedule_id(self):
return self._real_time_scan_schedule_id
@real_time_scan_schedule_id.setter
def real_time_scan_schedule_id(self, real_time_scan_schedule_id):
self._real_time_scan_schedule_id = real_time_scan_schedule_id
@property
def manual_scan_configuration_id(self):
return self._manual_scan_configuration_id
@manual_scan_configuration_id.setter
def manual_scan_configuration_id(self, manual_scan_configuration_id):
self._manual_scan_configuration_id = manual_scan_configuration_id
@property
def scheduled_scan_configuration_id(self):
return self._scheduled_scan_configuration_id
@scheduled_scan_configuration_id.setter
def scheduled_scan_configuration_id(self, scheduled_scan_configuration_id):
self._scheduled_scan_configuration_id = scheduled_scan_configuration_id
@property
def last_manual_scan(self):
return self._last_manual_scan
@last_manual_scan.setter
def last_manual_scan(self, last_manual_scan):
self._last_manual_scan = last_manual_scan
@property
def last_scheduled_scan(self):
return self._last_scheduled_scan
@last_scheduled_scan.setter
def last_scheduled_scan(self, last_scheduled_scan):
self._last_scheduled_scan = last_scheduled_scan
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AntiMalwareComputerExtension, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, AntiMalwareComputerExtension):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f71787b41422d7fb8b33c9454081d2bd60c5fe8f
| 1,150
|
py
|
Python
|
cirq/contrib/svg/svg_test.py
|
lilies/Cirq
|
519b8b70ba4d2d92d1c034c398161ebdbd23e2e7
|
[
"Apache-2.0"
] | 3
|
2020-09-26T03:56:28.000Z
|
2020-09-27T13:21:04.000Z
|
cirq/contrib/svg/svg_test.py
|
lilies/Cirq
|
519b8b70ba4d2d92d1c034c398161ebdbd23e2e7
|
[
"Apache-2.0"
] | 1
|
2020-08-11T15:45:17.000Z
|
2020-08-11T15:45:17.000Z
|
cirq/contrib/svg/svg_test.py
|
lilies/Cirq
|
519b8b70ba4d2d92d1c034c398161ebdbd23e2e7
|
[
"Apache-2.0"
] | 1
|
2020-04-14T15:29:29.000Z
|
2020-04-14T15:29:29.000Z
|
import pytest
import numpy as np
import cirq
from cirq.contrib.svg import circuit_to_svg
def test_svg():
a, b, c = cirq.LineQubit.range(3)
svg_text = circuit_to_svg(
cirq.Circuit(
cirq.CNOT(a, b),
cirq.CZ(b, c),
cirq.SWAP(a, c),
cirq.PhasedXPowGate(exponent=0.123, phase_exponent=0.456).on(c),
cirq.Z(a),
cirq.measure(a, b, c, key='z'),
cirq.MatrixGate(np.eye(2)).on(a),
))
assert '<svg' in svg_text
assert '</svg>' in svg_text
def test_svg_noise():
noise_model = cirq.ConstantQubitNoiseModel(cirq.DepolarizingChannel(p=1e-3))
q = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.X(q))
circuit = cirq.Circuit(noise_model.noisy_moments(circuit.moments, [q]))
svg = circuit_to_svg(circuit)
assert '>D(0.001)</text>' in svg
def test_validation():
with pytest.raises(ValueError):
circuit_to_svg(cirq.Circuit())
q0 = cirq.LineQubit(0)
with pytest.raises(ValueError):
circuit_to_svg(
cirq.Circuit([cirq.Moment([cirq.X(q0)]),
cirq.Moment([])]))
| 26.744186
| 80
| 0.6
|
import pytest
import numpy as np
import cirq
from cirq.contrib.svg import circuit_to_svg
def test_svg():
a, b, c = cirq.LineQubit.range(3)
svg_text = circuit_to_svg(
cirq.Circuit(
cirq.CNOT(a, b),
cirq.CZ(b, c),
cirq.SWAP(a, c),
cirq.PhasedXPowGate(exponent=0.123, phase_exponent=0.456).on(c),
cirq.Z(a),
cirq.measure(a, b, c, key='z'),
cirq.MatrixGate(np.eye(2)).on(a),
))
assert '<svg' in svg_text
assert '</svg>' in svg_text
def test_svg_noise():
noise_model = cirq.ConstantQubitNoiseModel(cirq.DepolarizingChannel(p=1e-3))
q = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.X(q))
circuit = cirq.Circuit(noise_model.noisy_moments(circuit.moments, [q]))
svg = circuit_to_svg(circuit)
assert '>D(0.001)</text>' in svg
def test_validation():
with pytest.raises(ValueError):
circuit_to_svg(cirq.Circuit())
q0 = cirq.LineQubit(0)
with pytest.raises(ValueError):
circuit_to_svg(
cirq.Circuit([cirq.Moment([cirq.X(q0)]),
cirq.Moment([])]))
| true
| true
|
f71788ff490545bb580c7e52b9c71e363cbb8d15
| 6,165
|
py
|
Python
|
recipes/libtool/all/test_package/conanfile.py
|
dpronin/conan-center-index
|
5c6e41a618097d04e731c9831118a51dcb39ab3f
|
[
"MIT"
] | 1
|
2021-11-11T03:07:13.000Z
|
2021-11-11T03:07:13.000Z
|
recipes/libtool/all/test_package/conanfile.py
|
dpronin/conan-center-index
|
5c6e41a618097d04e731c9831118a51dcb39ab3f
|
[
"MIT"
] | 1
|
2021-11-22T13:54:48.000Z
|
2021-11-22T14:09:45.000Z
|
recipes/libtool/all/test_package/conanfile.py
|
dpronin/conan-center-index
|
5c6e41a618097d04e731c9831118a51dcb39ab3f
|
[
"MIT"
] | null | null | null |
from conans import AutoToolsBuildEnvironment, CMake, ConanFile, tools
from contextlib import contextmanager
import glob
import os
import shutil
class TestPackageConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
test_type = "explicit"
short_paths = True
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
def requirements(self):
self.requires(self.tested_reference_str)
def build_requirements(self):
self.build_requires(self.tested_reference_str)
if self._settings_build.os == "Windows" and not tools.get_env("CONAN_BASH_PATH"):
self.build_requires("msys2/cci.latest")
@contextmanager
def _build_context(self):
if self.settings.compiler == "Visual Studio":
with tools.vcvars(self.settings):
with tools.environment_append({
"CC": "{} cl -nologo".format(tools.unix_path(self.deps_user_info["automake"].compile)),
"CXX": "{} cl -nologo".format(tools.unix_path(self.deps_user_info["automake"].compile)),
"AR": "{} lib".format(tools.unix_path(self.deps_user_info["automake"].ar_lib)),
"LD": "link",
}):
yield
else:
yield
@property
def _package_folder(self):
return os.path.join(self.build_folder, "package")
def _build_autotools(self):
""" Test autotools integration """
# Copy autotools directory to build folder
shutil.copytree(os.path.join(self.source_folder, "autotools"), os.path.join(self.build_folder, "autotools"))
with tools.chdir("autotools"):
self.run("{} --install --verbose -Wall".format(os.environ["AUTORECONF"]), win_bash=tools.os_info.is_windows)
tools.mkdir(self._package_folder)
conf_args = [
"--prefix={}".format(tools.unix_path(self._package_folder)),
"--enable-shared", "--enable-static",
]
os.mkdir("bin_autotools")
with tools.chdir("bin_autotools"):
with self._build_context():
autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
autotools.libs = []
autotools.configure(args=conf_args, configure_dir=os.path.join(self.build_folder, "autotools"))
autotools.make(args=["V=1"])
autotools.install()
def _test_autotools(self):
assert os.path.isdir(os.path.join(self._package_folder, "bin"))
assert os.path.isfile(os.path.join(self._package_folder, "include", "lib.h"))
assert os.path.isdir(os.path.join(self._package_folder, "lib"))
if not tools.cross_building(self):
self.run(os.path.join(self._package_folder, "bin", "test_package"), run_environment=True)
def _build_ltdl(self):
""" Build library using ltdl library """
cmake = CMake(self)
cmake.configure(source_folder="ltdl")
cmake.build()
def _test_ltdl(self):
""" Test library using ltdl library"""
lib_suffix = {
"Linux": "so",
"FreeBSD": "so",
"Macos": "dylib",
"Windows": "dll",
}[str(self.settings.os)]
if not tools.cross_building(self):
bin_path = os.path.join("bin", "test_package")
libdir = "bin" if self.settings.os == "Windows" else "lib"
lib_path = os.path.join(libdir, "liba.{}".format(lib_suffix))
self.run("{} {}".format(bin_path, lib_path), run_environment=True)
def _build_static_lib_in_shared(self):
""" Build shared library using libtool (while linking to a static library) """
# Copy static-in-shared directory to build folder
autotools_folder = os.path.join(self.build_folder, "sis")
shutil.copytree(os.path.join(self.source_folder, "sis"), autotools_folder)
install_prefix = os.path.join(autotools_folder, "prefix")
# Build static library using CMake
cmake = CMake(self)
cmake.definitions["CMAKE_INSTALL_PREFIX"] = install_prefix
cmake.configure(source_folder=autotools_folder, build_folder=os.path.join(autotools_folder, "cmake_build"))
cmake.build()
cmake.install()
# Copy autotools directory to build folder
with tools.chdir(autotools_folder):
self.run("{} -ifv -Wall".format(os.environ["AUTORECONF"]), win_bash=tools.os_info.is_windows)
with tools.chdir(autotools_folder):
conf_args = [
"--enable-shared",
"--disable-static",
"--prefix={}".format(tools.unix_path(os.path.join(install_prefix))),
]
with self._build_context():
autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
autotools.libs = []
autotools.link_flags.append("-L{}".format(tools.unix_path(os.path.join(install_prefix, "lib"))))
autotools.configure(args=conf_args, configure_dir=autotools_folder)
autotools.make(args=["V=1"])
autotools.install()
def _test_static_lib_in_shared(self):
""" Test existence of shared library """
install_prefix = os.path.join(self.build_folder, "sis", "prefix")
with tools.chdir(install_prefix):
if self.settings.os == "Windows":
assert len(list(glob.glob(os.path.join("bin", "*.dll")))) > 0
elif tools.is_apple_os(self.settings.os):
assert len(list(glob.glob(os.path.join("lib", "*.dylib")))) > 0
else:
assert len(list(glob.glob(os.path.join("lib", "*.so")))) > 0
def build(self):
self._build_ltdl()
if not tools.cross_building(self):
self._build_autotools()
self._build_static_lib_in_shared()
def test(self):
self._test_ltdl()
if not tools.cross_building(self):
self._test_autotools()
self._test_static_lib_in_shared()
| 40.559211
| 120
| 0.610868
|
from conans import AutoToolsBuildEnvironment, CMake, ConanFile, tools
from contextlib import contextmanager
import glob
import os
import shutil
class TestPackageConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
test_type = "explicit"
short_paths = True
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
def requirements(self):
self.requires(self.tested_reference_str)
def build_requirements(self):
self.build_requires(self.tested_reference_str)
if self._settings_build.os == "Windows" and not tools.get_env("CONAN_BASH_PATH"):
self.build_requires("msys2/cci.latest")
@contextmanager
def _build_context(self):
if self.settings.compiler == "Visual Studio":
with tools.vcvars(self.settings):
with tools.environment_append({
"CC": "{} cl -nologo".format(tools.unix_path(self.deps_user_info["automake"].compile)),
"CXX": "{} cl -nologo".format(tools.unix_path(self.deps_user_info["automake"].compile)),
"AR": "{} lib".format(tools.unix_path(self.deps_user_info["automake"].ar_lib)),
"LD": "link",
}):
yield
else:
yield
@property
def _package_folder(self):
return os.path.join(self.build_folder, "package")
def _build_autotools(self):
shutil.copytree(os.path.join(self.source_folder, "autotools"), os.path.join(self.build_folder, "autotools"))
with tools.chdir("autotools"):
self.run("{} --install --verbose -Wall".format(os.environ["AUTORECONF"]), win_bash=tools.os_info.is_windows)
tools.mkdir(self._package_folder)
conf_args = [
"--prefix={}".format(tools.unix_path(self._package_folder)),
"--enable-shared", "--enable-static",
]
os.mkdir("bin_autotools")
with tools.chdir("bin_autotools"):
with self._build_context():
autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
autotools.libs = []
autotools.configure(args=conf_args, configure_dir=os.path.join(self.build_folder, "autotools"))
autotools.make(args=["V=1"])
autotools.install()
def _test_autotools(self):
assert os.path.isdir(os.path.join(self._package_folder, "bin"))
assert os.path.isfile(os.path.join(self._package_folder, "include", "lib.h"))
assert os.path.isdir(os.path.join(self._package_folder, "lib"))
if not tools.cross_building(self):
self.run(os.path.join(self._package_folder, "bin", "test_package"), run_environment=True)
def _build_ltdl(self):
cmake = CMake(self)
cmake.configure(source_folder="ltdl")
cmake.build()
def _test_ltdl(self):
lib_suffix = {
"Linux": "so",
"FreeBSD": "so",
"Macos": "dylib",
"Windows": "dll",
}[str(self.settings.os)]
if not tools.cross_building(self):
bin_path = os.path.join("bin", "test_package")
libdir = "bin" if self.settings.os == "Windows" else "lib"
lib_path = os.path.join(libdir, "liba.{}".format(lib_suffix))
self.run("{} {}".format(bin_path, lib_path), run_environment=True)
def _build_static_lib_in_shared(self):
autotools_folder = os.path.join(self.build_folder, "sis")
shutil.copytree(os.path.join(self.source_folder, "sis"), autotools_folder)
install_prefix = os.path.join(autotools_folder, "prefix")
cmake = CMake(self)
cmake.definitions["CMAKE_INSTALL_PREFIX"] = install_prefix
cmake.configure(source_folder=autotools_folder, build_folder=os.path.join(autotools_folder, "cmake_build"))
cmake.build()
cmake.install()
with tools.chdir(autotools_folder):
self.run("{} -ifv -Wall".format(os.environ["AUTORECONF"]), win_bash=tools.os_info.is_windows)
with tools.chdir(autotools_folder):
conf_args = [
"--enable-shared",
"--disable-static",
"--prefix={}".format(tools.unix_path(os.path.join(install_prefix))),
]
with self._build_context():
autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
autotools.libs = []
autotools.link_flags.append("-L{}".format(tools.unix_path(os.path.join(install_prefix, "lib"))))
autotools.configure(args=conf_args, configure_dir=autotools_folder)
autotools.make(args=["V=1"])
autotools.install()
def _test_static_lib_in_shared(self):
install_prefix = os.path.join(self.build_folder, "sis", "prefix")
with tools.chdir(install_prefix):
if self.settings.os == "Windows":
assert len(list(glob.glob(os.path.join("bin", "*.dll")))) > 0
elif tools.is_apple_os(self.settings.os):
assert len(list(glob.glob(os.path.join("lib", "*.dylib")))) > 0
else:
assert len(list(glob.glob(os.path.join("lib", "*.so")))) > 0
def build(self):
self._build_ltdl()
if not tools.cross_building(self):
self._build_autotools()
self._build_static_lib_in_shared()
def test(self):
self._test_ltdl()
if not tools.cross_building(self):
self._test_autotools()
self._test_static_lib_in_shared()
| true
| true
|
f71789c0f44a5dca7010d4a66926c97673dff301
| 4,401
|
py
|
Python
|
models/backbone.py
|
playerkk/HoiTransformer
|
b710216d6b338863ebe9d40a96765ab52780cefa
|
[
"Apache-2.0"
] | 107
|
2021-03-03T13:31:32.000Z
|
2022-03-31T10:59:45.000Z
|
models/backbone.py
|
playerkk/HoiTransformer
|
b710216d6b338863ebe9d40a96765ab52780cefa
|
[
"Apache-2.0"
] | 37
|
2021-03-10T11:36:49.000Z
|
2022-02-22T03:58:12.000Z
|
models/backbone.py
|
playerkk/HoiTransformer
|
b710216d6b338863ebe9d40a96765ab52780cefa
|
[
"Apache-2.0"
] | 19
|
2021-03-17T13:21:03.000Z
|
2022-02-09T09:48:58.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Backbone modules.
"""
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Dict, List
from util.misc import NestedTensor, is_main_process
from .position_encoding import build_position_encoding
class FrozenBatchNorm2d(torch.nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt,
without which any other models than torchvision.models.resnet[18,34,50,101]
produce nans.
"""
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
num_batches_tracked_key = prefix + 'num_batches_tracked'
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super(FrozenBatchNorm2d, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs)
def forward(self, x):
# move reshapes to the beginning
# to make it fuser-friendly
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
rv = self.running_var.reshape(1, -1, 1, 1)
rm = self.running_mean.reshape(1, -1, 1, 1)
eps = 1e-5
scale = w * (rv + eps).rsqrt()
bias = b - rm * scale
return x * scale + bias
class BackboneBase(nn.Module):
def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool):
super().__init__()
for name, parameter in backbone.named_parameters():
if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
parameter.requires_grad_(False)
if return_interm_layers:
return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
else:
return_layers = {'layer4': "0"}
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.num_channels = num_channels
def forward(self, tensor_list: NestedTensor):
xs = self.body(tensor_list.tensors)
out: Dict[str, NestedTensor] = {}
for name, x in xs.items():
m = tensor_list.mask
assert m is not None
mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
out[name] = NestedTensor(x, mask)
return out
class Backbone(BackboneBase):
"""ResNet backbone with frozen BatchNorm."""
def __init__(self, name: str,
train_backbone: bool,
return_interm_layers: bool,
dilation: bool):
backbone = getattr(torchvision.models, name)(
replace_stride_with_dilation=[False, False, dilation],
pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d)
num_channels = 512 if name in ('resnet18', 'resnet34') else 2048
super().__init__(backbone, train_backbone, num_channels, return_interm_layers)
class Joiner(nn.Sequential):
def __init__(self, backbone, position_embedding):
super().__init__(backbone, position_embedding)
def forward(self, tensor_list: NestedTensor):
xs = self[0](tensor_list)
out: List[NestedTensor] = []
pos = []
for name, x in xs.items():
out.append(x)
# position encoding
pos.append(self[1](x).to(x.tensors.dtype))
return out, pos
def build_backbone(args):
position_embedding = build_position_encoding(args)
train_backbone = args.lr_backbone > 0
return_interm_layers = False # args.masks
backbone = Backbone(args.backbone, train_backbone, return_interm_layers, False)
model = Joiner(backbone, position_embedding)
model.num_channels = backbone.num_channels
return model
| 37.29661
| 113
| 0.656669
|
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Dict, List
from util.misc import NestedTensor, is_main_process
from .position_encoding import build_position_encoding
class FrozenBatchNorm2d(torch.nn.Module):
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
num_batches_tracked_key = prefix + 'num_batches_tracked'
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super(FrozenBatchNorm2d, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs)
def forward(self, x):
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
rv = self.running_var.reshape(1, -1, 1, 1)
rm = self.running_mean.reshape(1, -1, 1, 1)
eps = 1e-5
scale = w * (rv + eps).rsqrt()
bias = b - rm * scale
return x * scale + bias
class BackboneBase(nn.Module):
def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool):
super().__init__()
for name, parameter in backbone.named_parameters():
if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
parameter.requires_grad_(False)
if return_interm_layers:
return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
else:
return_layers = {'layer4': "0"}
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.num_channels = num_channels
def forward(self, tensor_list: NestedTensor):
xs = self.body(tensor_list.tensors)
out: Dict[str, NestedTensor] = {}
for name, x in xs.items():
m = tensor_list.mask
assert m is not None
mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
out[name] = NestedTensor(x, mask)
return out
class Backbone(BackboneBase):
def __init__(self, name: str,
train_backbone: bool,
return_interm_layers: bool,
dilation: bool):
backbone = getattr(torchvision.models, name)(
replace_stride_with_dilation=[False, False, dilation],
pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d)
num_channels = 512 if name in ('resnet18', 'resnet34') else 2048
super().__init__(backbone, train_backbone, num_channels, return_interm_layers)
class Joiner(nn.Sequential):
def __init__(self, backbone, position_embedding):
super().__init__(backbone, position_embedding)
def forward(self, tensor_list: NestedTensor):
xs = self[0](tensor_list)
out: List[NestedTensor] = []
pos = []
for name, x in xs.items():
out.append(x)
pos.append(self[1](x).to(x.tensors.dtype))
return out, pos
def build_backbone(args):
position_embedding = build_position_encoding(args)
train_backbone = args.lr_backbone > 0
return_interm_layers = False
backbone = Backbone(args.backbone, train_backbone, return_interm_layers, False)
model = Joiner(backbone, position_embedding)
model.num_channels = backbone.num_channels
return model
| true
| true
|
f7178a5f0eaaff23bb2efef24ecd4ae204c5ee9e
| 638
|
py
|
Python
|
src/django_backend_api/manage.py
|
Adityaraj1711/django-backend-architecture
|
7f3c270af0cb5dd2ebc097c7436a4958cd48ff7c
|
[
"MIT"
] | 25
|
2020-04-28T19:25:28.000Z
|
2021-07-04T17:24:35.000Z
|
src/django_backend_api/manage.py
|
Adityaraj1711/django-backend-architecture
|
7f3c270af0cb5dd2ebc097c7436a4958cd48ff7c
|
[
"MIT"
] | 13
|
2020-08-05T22:40:37.000Z
|
2022-03-12T00:24:36.000Z
|
src/django_backend_api/manage.py
|
Adityaraj1711/django-backend-architecture
|
7f3c270af0cb5dd2ebc097c7436a4958cd48ff7c
|
[
"MIT"
] | 2
|
2020-10-29T13:10:01.000Z
|
2021-11-22T01:55:14.000Z
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_backend_api.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29
| 82
| 0.688088
|
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_backend_api.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true
| true
|
f7178a80bd5a4eb44808ea505c773373aa6ce545
| 102
|
py
|
Python
|
example.py
|
CJSmekens/code-refinery-bq-1
|
12cf67033e82176da665156037204eb405a0f6a7
|
[
"Apache-2.0"
] | null | null | null |
example.py
|
CJSmekens/code-refinery-bq-1
|
12cf67033e82176da665156037204eb405a0f6a7
|
[
"Apache-2.0"
] | 2
|
2021-06-07T10:01:32.000Z
|
2021-06-07T10:10:18.000Z
|
example.py
|
CJSmekens/code-refinery-bq-1
|
12cf67033e82176da665156037204eb405a0f6a7
|
[
"Apache-2.0"
] | 1
|
2021-06-07T09:53:19.000Z
|
2021-06-07T09:53:19.000Z
|
def add(a,b):
return a + b
def subtract(a,b):
return a - b
def product(a,b):
return a * b
| 11.333333
| 18
| 0.558824
|
def add(a,b):
return a + b
def subtract(a,b):
return a - b
def product(a,b):
return a * b
| true
| true
|
f7178af25c554b1c64de2dc78db065ca48b7edd5
| 10,521
|
py
|
Python
|
micro_app_sdk/model/ops_automation/job_tasks_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5
|
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
micro_app_sdk/model/ops_automation/job_tasks_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
micro_app_sdk/model/ops_automation/job_tasks_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: job_tasks.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from micro_app_sdk.model.ops_automation import mail_info_pb2 as micro__app__sdk_dot_model_dot_ops__automation_dot_mail__info__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='job_tasks.proto',
package='ops_automation',
syntax='proto3',
serialized_options=_b('ZHgo.easyops.local/contracts/protorepo-models/easyops/model/ops_automation'),
serialized_pb=_b('\n\x0fjob_tasks.proto\x12\x0eops_automation\x1a\x32micro_app_sdk/model/ops_automation/mail_info.proto\"\x82\x03\n\x08JobTasks\x12\n\n\x02id\x18\x01 \x01(\t\x12\r\n\x05jobId\x18\x02 \x01(\t\x12\x0f\n\x07jobName\x18\x03 \x01(\t\x12\x10\n\x08menuName\x18\x04 \x01(\t\x12\x0e\n\x06\x65xecId\x18\x05 \x01(\t\x12\x14\n\x0cresourceType\x18\x06 \x01(\t\x12\x12\n\nresourceId\x18\x07 \x01(\t\x12\x13\n\x0bresourceVId\x18\x08 \x01(\t\x12\x15\n\rresourceVName\x18\t \x01(\t\x12\x0f\n\x07trigger\x18\n \x01(\t\x12\x10\n\x08\x65xecUser\x18\x0b \x01(\t\x12\r\n\x05hosts\x18\x0c \x03(\t\x12\x0e\n\x06status\x18\r \x01(\t\x12&\n\x04mail\x18\x0e \x01(\x0b\x32\x18.ops_automation.MailInfo\x12\x13\n\x0bsuccessRate\x18\x0f \x01(\x02\x12\r\n\x05\x65rror\x18\x10 \x01(\t\x12\x12\n\ncreateTime\x18\x11 \x01(\t\x12\x12\n\nupdateTime\x18\x12 \x01(\t\x12\x0f\n\x07\x63reator\x18\x13 \x01(\t\x12\x0b\n\x03org\x18\x14 \x01(\x05\x42JZHgo.easyops.local/contracts/protorepo-models/easyops/model/ops_automationb\x06proto3')
,
dependencies=[micro__app__sdk_dot_model_dot_ops__automation_dot_mail__info__pb2.DESCRIPTOR,])
_JOBTASKS = _descriptor.Descriptor(
name='JobTasks',
full_name='ops_automation.JobTasks',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='ops_automation.JobTasks.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='jobId', full_name='ops_automation.JobTasks.jobId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='jobName', full_name='ops_automation.JobTasks.jobName', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='menuName', full_name='ops_automation.JobTasks.menuName', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='execId', full_name='ops_automation.JobTasks.execId', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resourceType', full_name='ops_automation.JobTasks.resourceType', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resourceId', full_name='ops_automation.JobTasks.resourceId', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resourceVId', full_name='ops_automation.JobTasks.resourceVId', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resourceVName', full_name='ops_automation.JobTasks.resourceVName', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trigger', full_name='ops_automation.JobTasks.trigger', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='execUser', full_name='ops_automation.JobTasks.execUser', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hosts', full_name='ops_automation.JobTasks.hosts', index=11,
number=12, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='ops_automation.JobTasks.status', index=12,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mail', full_name='ops_automation.JobTasks.mail', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='successRate', full_name='ops_automation.JobTasks.successRate', index=14,
number=15, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='ops_automation.JobTasks.error', index=15,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='createTime', full_name='ops_automation.JobTasks.createTime', index=16,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='updateTime', full_name='ops_automation.JobTasks.updateTime', index=17,
number=18, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='ops_automation.JobTasks.creator', index=18,
number=19, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='org', full_name='ops_automation.JobTasks.org', index=19,
number=20, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=88,
serialized_end=474,
)
_JOBTASKS.fields_by_name['mail'].message_type = micro__app__sdk_dot_model_dot_ops__automation_dot_mail__info__pb2._MAILINFO
DESCRIPTOR.message_types_by_name['JobTasks'] = _JOBTASKS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
JobTasks = _reflection.GeneratedProtocolMessageType('JobTasks', (_message.Message,), {
'DESCRIPTOR' : _JOBTASKS,
'__module__' : 'job_tasks_pb2'
# @@protoc_insertion_point(class_scope:ops_automation.JobTasks)
})
_sym_db.RegisterMessage(JobTasks)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 50.581731
| 1,012
| 0.737763
|
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from micro_app_sdk.model.ops_automation import mail_info_pb2 as micro__app__sdk_dot_model_dot_ops__automation_dot_mail__info__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='job_tasks.proto',
package='ops_automation',
syntax='proto3',
serialized_options=_b('ZHgo.easyops.local/contracts/protorepo-models/easyops/model/ops_automation'),
serialized_pb=_b('\n\x0fjob_tasks.proto\x12\x0eops_automation\x1a\x32micro_app_sdk/model/ops_automation/mail_info.proto\"\x82\x03\n\x08JobTasks\x12\n\n\x02id\x18\x01 \x01(\t\x12\r\n\x05jobId\x18\x02 \x01(\t\x12\x0f\n\x07jobName\x18\x03 \x01(\t\x12\x10\n\x08menuName\x18\x04 \x01(\t\x12\x0e\n\x06\x65xecId\x18\x05 \x01(\t\x12\x14\n\x0cresourceType\x18\x06 \x01(\t\x12\x12\n\nresourceId\x18\x07 \x01(\t\x12\x13\n\x0bresourceVId\x18\x08 \x01(\t\x12\x15\n\rresourceVName\x18\t \x01(\t\x12\x0f\n\x07trigger\x18\n \x01(\t\x12\x10\n\x08\x65xecUser\x18\x0b \x01(\t\x12\r\n\x05hosts\x18\x0c \x03(\t\x12\x0e\n\x06status\x18\r \x01(\t\x12&\n\x04mail\x18\x0e \x01(\x0b\x32\x18.ops_automation.MailInfo\x12\x13\n\x0bsuccessRate\x18\x0f \x01(\x02\x12\r\n\x05\x65rror\x18\x10 \x01(\t\x12\x12\n\ncreateTime\x18\x11 \x01(\t\x12\x12\n\nupdateTime\x18\x12 \x01(\t\x12\x0f\n\x07\x63reator\x18\x13 \x01(\t\x12\x0b\n\x03org\x18\x14 \x01(\x05\x42JZHgo.easyops.local/contracts/protorepo-models/easyops/model/ops_automationb\x06proto3')
,
dependencies=[micro__app__sdk_dot_model_dot_ops__automation_dot_mail__info__pb2.DESCRIPTOR,])
_JOBTASKS = _descriptor.Descriptor(
name='JobTasks',
full_name='ops_automation.JobTasks',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='ops_automation.JobTasks.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='jobId', full_name='ops_automation.JobTasks.jobId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='jobName', full_name='ops_automation.JobTasks.jobName', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='menuName', full_name='ops_automation.JobTasks.menuName', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='execId', full_name='ops_automation.JobTasks.execId', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resourceType', full_name='ops_automation.JobTasks.resourceType', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resourceId', full_name='ops_automation.JobTasks.resourceId', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resourceVId', full_name='ops_automation.JobTasks.resourceVId', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resourceVName', full_name='ops_automation.JobTasks.resourceVName', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trigger', full_name='ops_automation.JobTasks.trigger', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='execUser', full_name='ops_automation.JobTasks.execUser', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hosts', full_name='ops_automation.JobTasks.hosts', index=11,
number=12, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='ops_automation.JobTasks.status', index=12,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mail', full_name='ops_automation.JobTasks.mail', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='successRate', full_name='ops_automation.JobTasks.successRate', index=14,
number=15, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='ops_automation.JobTasks.error', index=15,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='createTime', full_name='ops_automation.JobTasks.createTime', index=16,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='updateTime', full_name='ops_automation.JobTasks.updateTime', index=17,
number=18, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='ops_automation.JobTasks.creator', index=18,
number=19, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='org', full_name='ops_automation.JobTasks.org', index=19,
number=20, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=88,
serialized_end=474,
)
_JOBTASKS.fields_by_name['mail'].message_type = micro__app__sdk_dot_model_dot_ops__automation_dot_mail__info__pb2._MAILINFO
DESCRIPTOR.message_types_by_name['JobTasks'] = _JOBTASKS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
JobTasks = _reflection.GeneratedProtocolMessageType('JobTasks', (_message.Message,), {
'DESCRIPTOR' : _JOBTASKS,
'__module__' : 'job_tasks_pb2'
# @@protoc_insertion_point(class_scope:ops_automation.JobTasks)
})
_sym_db.RegisterMessage(JobTasks)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| true
| true
|
f7178bb0eb777f6aea029f4307dd32ff4cc85674
| 2,500
|
py
|
Python
|
mobilecoind/strategies/balances.py
|
mccobr/mobilecoin
|
cd7753a0aed838097b456d230151fb34e8cff034
|
[
"Apache-2.0"
] | 2
|
2021-03-13T04:41:04.000Z
|
2021-03-13T04:42:37.000Z
|
mobilecoind/strategies/balances.py
|
mccobr/mobilecoin
|
cd7753a0aed838097b456d230151fb34e8cff034
|
[
"Apache-2.0"
] | 292
|
2020-10-22T00:34:35.000Z
|
2022-03-29T09:29:14.000Z
|
mobilecoind/strategies/balances.py
|
mccobr/mobilecoin
|
cd7753a0aed838097b456d230151fb34e8cff034
|
[
"Apache-2.0"
] | 1
|
2022-03-26T20:34:00.000Z
|
2022-03-26T20:34:00.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2018-2021 The MobileCoin Foundation
"""
The purpose of this script is to print the balances for all keys in
a given account directory.
Example setup and usage:
```
python3 balances.py --key-dir ../../../target/sample_data/master/keys/
```
"""
import argparse
import grpc
import mobilecoind_api_pb2
import mobilecoind_api_pb2_grpc
import os
from accounts import connect, load_key_and_register
from google.protobuf.empty_pb2 import Empty
def parse_args() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument("--mobilecoind-host",
default="localhost",
type=str,
help="Mobilecoind host")
parser.add_argument("--mobilecoind-port",
default="4444",
type=str,
help="Mobilecoind port")
parser.add_argument("--key-dir",
required=True,
type=str,
help="Path to account key dir")
parser.add_argument("--prune",
action="store_true",
help="Prune key files for accounts with 0 balance")
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
print(args)
stub = connect(args.mobilecoind_host, args.mobilecoind_port)
block_count = stub.GetLedgerInfo(Empty()).block_count
total = 0
for keyfile in sorted(
filter(lambda x: x.endswith(".json"), os.listdir(args.key_dir))):
print(keyfile)
account_data = load_key_and_register(
os.path.join(args.key_dir, keyfile), stub)
# Get starting balance
request = mobilecoind_api_pb2.GetMonitorStatusRequest(monitor_id=account_data.monitor_id)
monitor_block = stub.GetMonitorStatus(request).status.next_block
if block_count != monitor_block:
print(f"\tAccount not synced.")
else:
resp = stub.GetBalance(
mobilecoind_api_pb2.GetBalanceRequest(monitor_id=account_data.monitor_id))
balance = resp.balance
total += balance
print(f"\tBalance: {resp.balance:,}")
# Remove balances of 0 FIXME: MC-367 also from mobilecoind wallet
if int(balance) == 0 and args.prune:
os.remove(os.path.join(args.key_dir, keyfile))
print(f"Total balance of key collection: {total:,} PicoMob")
| 34.246575
| 97
| 0.6156
|
import argparse
import grpc
import mobilecoind_api_pb2
import mobilecoind_api_pb2_grpc
import os
from accounts import connect, load_key_and_register
from google.protobuf.empty_pb2 import Empty
def parse_args() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument("--mobilecoind-host",
default="localhost",
type=str,
help="Mobilecoind host")
parser.add_argument("--mobilecoind-port",
default="4444",
type=str,
help="Mobilecoind port")
parser.add_argument("--key-dir",
required=True,
type=str,
help="Path to account key dir")
parser.add_argument("--prune",
action="store_true",
help="Prune key files for accounts with 0 balance")
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
print(args)
stub = connect(args.mobilecoind_host, args.mobilecoind_port)
block_count = stub.GetLedgerInfo(Empty()).block_count
total = 0
for keyfile in sorted(
filter(lambda x: x.endswith(".json"), os.listdir(args.key_dir))):
print(keyfile)
account_data = load_key_and_register(
os.path.join(args.key_dir, keyfile), stub)
request = mobilecoind_api_pb2.GetMonitorStatusRequest(monitor_id=account_data.monitor_id)
monitor_block = stub.GetMonitorStatus(request).status.next_block
if block_count != monitor_block:
print(f"\tAccount not synced.")
else:
resp = stub.GetBalance(
mobilecoind_api_pb2.GetBalanceRequest(monitor_id=account_data.monitor_id))
balance = resp.balance
total += balance
print(f"\tBalance: {resp.balance:,}")
if int(balance) == 0 and args.prune:
os.remove(os.path.join(args.key_dir, keyfile))
print(f"Total balance of key collection: {total:,} PicoMob")
| true
| true
|
f7178bf35043b3dda373e2b1334ac17197d12419
| 52,295
|
py
|
Python
|
Lib/test/test_posix.py
|
sireliah/polish-python
|
605df4944c2d3bc25f8bf6964b274c0a0d297cc3
|
[
"PSF-2.0"
] | 1
|
2018-06-21T18:21:24.000Z
|
2018-06-21T18:21:24.000Z
|
Lib/test/test_posix.py
|
sireliah/polish-python
|
605df4944c2d3bc25f8bf6964b274c0a0d297cc3
|
[
"PSF-2.0"
] | null | null | null |
Lib/test/test_posix.py
|
sireliah/polish-python
|
605df4944c2d3bc25f8bf6964b274c0a0d297cc3
|
[
"PSF-2.0"
] | null | null | null |
"Test posix functions"
z test zaimportuj support
# Skip these tests jeżeli there jest no posix module.
posix = support.import_module('posix')
zaimportuj errno
zaimportuj sys
zaimportuj time
zaimportuj os
zaimportuj platform
zaimportuj pwd
zaimportuj shutil
zaimportuj stat
zaimportuj tempfile
zaimportuj unittest
zaimportuj warnings
_DUMMY_SYMLINK = os.path.join(tempfile.gettempdir(),
support.TESTFN + '-dummy-symlink')
klasa PosixTester(unittest.TestCase):
def setUp(self):
# create empty file
fp = open(support.TESTFN, 'w+')
fp.close()
self.teardown_files = [ support.TESTFN ]
self._warnings_manager = support.check_warnings()
self._warnings_manager.__enter__()
warnings.filterwarnings('ignore', '.* potential security risk .*',
RuntimeWarning)
def tearDown(self):
dla teardown_file w self.teardown_files:
support.unlink(teardown_file)
self._warnings_manager.__exit__(Nic, Nic, Nic)
def testNoArgFunctions(self):
# test posix functions which take no arguments oraz have
# no side-effects which we need to cleanup (e.g., fork, wait, abort)
NO_ARG_FUNCTIONS = [ "ctermid", "getcwd", "getcwdb", "uname",
"times", "getloadavg",
"getegid", "geteuid", "getgid", "getgroups",
"getpid", "getpgrp", "getppid", "getuid", "sync",
]
dla name w NO_ARG_FUNCTIONS:
posix_func = getattr(posix, name, Nic)
jeżeli posix_func jest nie Nic:
posix_func()
self.assertRaises(TypeError, posix_func, 1)
@unittest.skipUnless(hasattr(posix, 'getresuid'),
'test needs posix.getresuid()')
def test_getresuid(self):
user_ids = posix.getresuid()
self.assertEqual(len(user_ids), 3)
dla val w user_ids:
self.assertGreaterEqual(val, 0)
@unittest.skipUnless(hasattr(posix, 'getresgid'),
'test needs posix.getresgid()')
def test_getresgid(self):
group_ids = posix.getresgid()
self.assertEqual(len(group_ids), 3)
dla val w group_ids:
self.assertGreaterEqual(val, 0)
@unittest.skipUnless(hasattr(posix, 'setresuid'),
'test needs posix.setresuid()')
def test_setresuid(self):
current_user_ids = posix.getresuid()
self.assertIsNic(posix.setresuid(*current_user_ids))
# -1 means don't change that value.
self.assertIsNic(posix.setresuid(-1, -1, -1))
@unittest.skipUnless(hasattr(posix, 'setresuid'),
'test needs posix.setresuid()')
def test_setresuid_exception(self):
# Don't do this test jeżeli someone jest silly enough to run us jako root.
current_user_ids = posix.getresuid()
jeżeli 0 nie w current_user_ids:
new_user_ids = (current_user_ids[0]+1, -1, -1)
self.assertRaises(OSError, posix.setresuid, *new_user_ids)
@unittest.skipUnless(hasattr(posix, 'setresgid'),
'test needs posix.setresgid()')
def test_setresgid(self):
current_group_ids = posix.getresgid()
self.assertIsNic(posix.setresgid(*current_group_ids))
# -1 means don't change that value.
self.assertIsNic(posix.setresgid(-1, -1, -1))
@unittest.skipUnless(hasattr(posix, 'setresgid'),
'test needs posix.setresgid()')
def test_setresgid_exception(self):
# Don't do this test jeżeli someone jest silly enough to run us jako root.
current_group_ids = posix.getresgid()
jeżeli 0 nie w current_group_ids:
new_group_ids = (current_group_ids[0]+1, -1, -1)
self.assertRaises(OSError, posix.setresgid, *new_group_ids)
@unittest.skipUnless(hasattr(posix, 'initgroups'),
"test needs os.initgroups()")
def test_initgroups(self):
# It takes a string oraz an integer; check that it podnieśs a TypeError
# dla other argument lists.
self.assertRaises(TypeError, posix.initgroups)
self.assertRaises(TypeError, posix.initgroups, Nic)
self.assertRaises(TypeError, posix.initgroups, 3, "foo")
self.assertRaises(TypeError, posix.initgroups, "foo", 3, object())
# If a non-privileged user invokes it, it should fail przy OSError
# EPERM.
jeżeli os.getuid() != 0:
spróbuj:
name = pwd.getpwuid(posix.getuid()).pw_name
wyjąwszy KeyError:
# the current UID may nie have a pwd entry
podnieś unittest.SkipTest("need a pwd entry")
spróbuj:
posix.initgroups(name, 13)
wyjąwszy OSError jako e:
self.assertEqual(e.errno, errno.EPERM)
inaczej:
self.fail("Expected OSError to be podnieśd by initgroups")
@unittest.skipUnless(hasattr(posix, 'statvfs'),
'test needs posix.statvfs()')
def test_statvfs(self):
self.assertPrawda(posix.statvfs(os.curdir))
@unittest.skipUnless(hasattr(posix, 'fstatvfs'),
'test needs posix.fstatvfs()')
def test_fstatvfs(self):
fp = open(support.TESTFN)
spróbuj:
self.assertPrawda(posix.fstatvfs(fp.fileno()))
self.assertPrawda(posix.statvfs(fp.fileno()))
w_końcu:
fp.close()
@unittest.skipUnless(hasattr(posix, 'ftruncate'),
'test needs posix.ftruncate()')
def test_ftruncate(self):
fp = open(support.TESTFN, 'w+')
spróbuj:
# we need to have some data to truncate
fp.write('test')
fp.flush()
posix.ftruncate(fp.fileno(), 0)
w_końcu:
fp.close()
@unittest.skipUnless(hasattr(posix, 'truncate'), "test needs posix.truncate()")
def test_truncate(self):
przy open(support.TESTFN, 'w') jako fp:
fp.write('test')
fp.flush()
posix.truncate(support.TESTFN, 0)
@unittest.skipUnless(getattr(os, 'execve', Nic) w os.supports_fd, "test needs execve() to support the fd parameter")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_fexecve(self):
fp = os.open(sys.executable, os.O_RDONLY)
spróbuj:
pid = os.fork()
jeżeli pid == 0:
os.chdir(os.path.split(sys.executable)[0])
posix.execve(fp, [sys.executable, '-c', 'pass'], os.environ)
inaczej:
self.assertEqual(os.waitpid(pid, 0), (pid, 0))
w_końcu:
os.close(fp)
@unittest.skipUnless(hasattr(posix, 'waitid'), "test needs posix.waitid()")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
def test_waitid(self):
pid = os.fork()
jeżeli pid == 0:
os.chdir(os.path.split(sys.executable)[0])
posix.execve(sys.executable, [sys.executable, '-c', 'pass'], os.environ)
inaczej:
res = posix.waitid(posix.P_PID, pid, posix.WEXITED)
self.assertEqual(pid, res.si_pid)
@unittest.skipUnless(hasattr(posix, 'lockf'), "test needs posix.lockf()")
def test_lockf(self):
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_CREAT)
spróbuj:
os.write(fd, b'test')
os.lseek(fd, 0, os.SEEK_SET)
posix.lockf(fd, posix.F_LOCK, 4)
# section jest locked
posix.lockf(fd, posix.F_ULOCK, 4)
w_końcu:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'pread'), "test needs posix.pread()")
def test_pread(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
spróbuj:
os.write(fd, b'test')
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(b'es', posix.pread(fd, 2, 1))
# the first pread() shouldn't disturb the file offset
self.assertEqual(b'te', posix.read(fd, 2))
w_końcu:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'pwrite'), "test needs posix.pwrite()")
def test_pwrite(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
spróbuj:
os.write(fd, b'test')
os.lseek(fd, 0, os.SEEK_SET)
posix.pwrite(fd, b'xx', 1)
self.assertEqual(b'txxt', posix.read(fd, 4))
w_końcu:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'posix_fallocate'),
"test needs posix.posix_fallocate()")
def test_posix_fallocate(self):
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_CREAT)
spróbuj:
posix.posix_fallocate(fd, 0, 10)
wyjąwszy OSError jako inst:
# issue10812, ZFS doesn't appear to support posix_fallocate,
# so skip Solaris-based since they are likely to have ZFS.
jeżeli inst.errno != errno.EINVAL albo nie sys.platform.startswith("sunos"):
podnieś
w_końcu:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'posix_fadvise'),
"test needs posix.posix_fadvise()")
def test_posix_fadvise(self):
fd = os.open(support.TESTFN, os.O_RDONLY)
spróbuj:
posix.posix_fadvise(fd, 0, 0, posix.POSIX_FADV_WILLNEED)
w_końcu:
os.close(fd)
@unittest.skipUnless(os.utime w os.supports_fd, "test needs fd support w os.utime")
def test_utime_with_fd(self):
now = time.time()
fd = os.open(support.TESTFN, os.O_RDONLY)
spróbuj:
posix.utime(fd)
posix.utime(fd, Nic)
self.assertRaises(TypeError, posix.utime, fd, (Nic, Nic))
self.assertRaises(TypeError, posix.utime, fd, (now, Nic))
self.assertRaises(TypeError, posix.utime, fd, (Nic, now))
posix.utime(fd, (int(now), int(now)))
posix.utime(fd, (now, now))
self.assertRaises(ValueError, posix.utime, fd, (now, now), ns=(now, now))
self.assertRaises(ValueError, posix.utime, fd, (now, 0), ns=(Nic, Nic))
self.assertRaises(ValueError, posix.utime, fd, (Nic, Nic), ns=(now, 0))
posix.utime(fd, (int(now), int((now - int(now)) * 1e9)))
posix.utime(fd, ns=(int(now), int((now - int(now)) * 1e9)))
w_końcu:
os.close(fd)
@unittest.skipUnless(os.utime w os.supports_follow_symlinks, "test needs follow_symlinks support w os.utime")
def test_utime_nofollow_symlinks(self):
now = time.time()
posix.utime(support.TESTFN, Nic, follow_symlinks=Nieprawda)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (Nic, Nic), follow_symlinks=Nieprawda)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, Nic), follow_symlinks=Nieprawda)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (Nic, now), follow_symlinks=Nieprawda)
posix.utime(support.TESTFN, (int(now), int(now)), follow_symlinks=Nieprawda)
posix.utime(support.TESTFN, (now, now), follow_symlinks=Nieprawda)
posix.utime(support.TESTFN, follow_symlinks=Nieprawda)
@unittest.skipUnless(hasattr(posix, 'writev'), "test needs posix.writev()")
def test_writev(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
spróbuj:
n = os.writev(fd, (b'test1', b'tt2', b't3'))
self.assertEqual(n, 10)
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(b'test1tt2t3', posix.read(fd, 10))
# Issue #20113: empty list of buffers should nie crash
spróbuj:
size = posix.writev(fd, [])
wyjąwszy OSError:
# writev(fd, []) podnieśs OSError(22, "Invalid argument")
# on OpenIndiana
dalej
inaczej:
self.assertEqual(size, 0)
w_końcu:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'readv'), "test needs posix.readv()")
def test_readv(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
spróbuj:
os.write(fd, b'test1tt2t3')
os.lseek(fd, 0, os.SEEK_SET)
buf = [bytearray(i) dla i w [5, 3, 2]]
self.assertEqual(posix.readv(fd, buf), 10)
self.assertEqual([b'test1', b'tt2', b't3'], [bytes(i) dla i w buf])
# Issue #20113: empty list of buffers should nie crash
spróbuj:
size = posix.readv(fd, [])
wyjąwszy OSError:
# readv(fd, []) podnieśs OSError(22, "Invalid argument")
# on OpenIndiana
dalej
inaczej:
self.assertEqual(size, 0)
w_końcu:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'dup'),
'test needs posix.dup()')
def test_dup(self):
fp = open(support.TESTFN)
spróbuj:
fd = posix.dup(fp.fileno())
self.assertIsInstance(fd, int)
os.close(fd)
w_końcu:
fp.close()
@unittest.skipUnless(hasattr(posix, 'confstr'),
'test needs posix.confstr()')
def test_confstr(self):
self.assertRaises(ValueError, posix.confstr, "CS_garbage")
self.assertEqual(len(posix.confstr("CS_PATH")) > 0, Prawda)
@unittest.skipUnless(hasattr(posix, 'dup2'),
'test needs posix.dup2()')
def test_dup2(self):
fp1 = open(support.TESTFN)
fp2 = open(support.TESTFN)
spróbuj:
posix.dup2(fp1.fileno(), fp2.fileno())
w_końcu:
fp1.close()
fp2.close()
@unittest.skipUnless(hasattr(os, 'O_CLOEXEC'), "needs os.O_CLOEXEC")
@support.requires_linux_version(2, 6, 23)
def test_oscloexec(self):
fd = os.open(support.TESTFN, os.O_RDONLY|os.O_CLOEXEC)
self.addCleanup(os.close, fd)
self.assertNieprawda(os.get_inheritable(fd))
@unittest.skipUnless(hasattr(posix, 'O_EXLOCK'),
'test needs posix.O_EXLOCK')
def test_osexlock(self):
fd = os.open(support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
jeżeli hasattr(posix, "O_SHLOCK"):
fd = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'O_SHLOCK'),
'test needs posix.O_SHLOCK')
def test_osshlock(self):
fd1 = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
fd2 = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
os.close(fd2)
os.close(fd1)
jeżeli hasattr(posix, "O_EXLOCK"):
fd = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, support.TESTFN,
os.O_RDONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'fstat'),
'test needs posix.fstat()')
def test_fstat(self):
fp = open(support.TESTFN)
spróbuj:
self.assertPrawda(posix.fstat(fp.fileno()))
self.assertPrawda(posix.stat(fp.fileno()))
self.assertRaisesRegex(TypeError,
'should be string, bytes albo integer, not',
posix.stat, float(fp.fileno()))
w_końcu:
fp.close()
@unittest.skipUnless(hasattr(posix, 'stat'),
'test needs posix.stat()')
def test_stat(self):
self.assertPrawda(posix.stat(support.TESTFN))
self.assertPrawda(posix.stat(os.fsencode(support.TESTFN)))
self.assertPrawda(posix.stat(bytearray(os.fsencode(support.TESTFN))))
self.assertRaisesRegex(TypeError,
'can\'t specify Nic dla path argument',
posix.stat, Nic)
self.assertRaisesRegex(TypeError,
'should be string, bytes albo integer, not',
posix.stat, list(support.TESTFN))
self.assertRaisesRegex(TypeError,
'should be string, bytes albo integer, not',
posix.stat, list(os.fsencode(support.TESTFN)))
@unittest.skipUnless(hasattr(posix, 'mkfifo'), "don't have mkfifo()")
def test_mkfifo(self):
support.unlink(support.TESTFN)
posix.mkfifo(support.TESTFN, stat.S_IRUSR | stat.S_IWUSR)
self.assertPrawda(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
@unittest.skipUnless(hasattr(posix, 'mknod') oraz hasattr(stat, 'S_IFIFO'),
"don't have mknod()/S_IFIFO")
def test_mknod(self):
# Test using mknod() to create a FIFO (the only use specified
# by POSIX).
support.unlink(support.TESTFN)
mode = stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR
spróbuj:
posix.mknod(support.TESTFN, mode, 0)
wyjąwszy OSError jako e:
# Some old systems don't allow unprivileged users to use
# mknod(), albo only support creating device nodes.
self.assertIn(e.errno, (errno.EPERM, errno.EINVAL))
inaczej:
self.assertPrawda(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
@unittest.skipUnless(hasattr(posix, 'stat'), 'test needs posix.stat()')
@unittest.skipUnless(hasattr(posix, 'makedev'), 'test needs posix.makedev()')
def test_makedev(self):
st = posix.stat(support.TESTFN)
dev = st.st_dev
self.assertIsInstance(dev, int)
self.assertGreaterEqual(dev, 0)
major = posix.major(dev)
self.assertIsInstance(major, int)
self.assertGreaterEqual(major, 0)
self.assertEqual(posix.major(dev), major)
self.assertRaises(TypeError, posix.major, float(dev))
self.assertRaises(TypeError, posix.major)
self.assertRaises((ValueError, OverflowError), posix.major, -1)
minor = posix.minor(dev)
self.assertIsInstance(minor, int)
self.assertGreaterEqual(minor, 0)
self.assertEqual(posix.minor(dev), minor)
self.assertRaises(TypeError, posix.minor, float(dev))
self.assertRaises(TypeError, posix.minor)
self.assertRaises((ValueError, OverflowError), posix.minor, -1)
self.assertEqual(posix.makedev(major, minor), dev)
self.assertRaises(TypeError, posix.makedev, float(major), minor)
self.assertRaises(TypeError, posix.makedev, major, float(minor))
self.assertRaises(TypeError, posix.makedev, major)
self.assertRaises(TypeError, posix.makedev)
def _test_all_chown_common(self, chown_func, first_param, stat_func):
"""Common code dla chown, fchown oraz lchown tests."""
def check_stat(uid, gid):
jeżeli stat_func jest nie Nic:
stat = stat_func(first_param)
self.assertEqual(stat.st_uid, uid)
self.assertEqual(stat.st_gid, gid)
uid = os.getuid()
gid = os.getgid()
# test a successful chown call
chown_func(first_param, uid, gid)
check_stat(uid, gid)
chown_func(first_param, -1, gid)
check_stat(uid, gid)
chown_func(first_param, uid, -1)
check_stat(uid, gid)
jeżeli uid == 0:
# Try an amusingly large uid/gid to make sure we handle
# large unsigned values. (chown lets you use any
# uid/gid you like, even jeżeli they aren't defined.)
#
# This problem keeps coming up:
# http://bugs.python.org/issue1747858
# http://bugs.python.org/issue4591
# http://bugs.python.org/issue15301
# Hopefully the fix w 4591 fixes it dla good!
#
# This part of the test only runs when run jako root.
# Only scary people run their tests jako root.
big_value = 2**31
chown_func(first_param, big_value, big_value)
check_stat(big_value, big_value)
chown_func(first_param, -1, -1)
check_stat(big_value, big_value)
chown_func(first_param, uid, gid)
check_stat(uid, gid)
albo_inaczej platform.system() w ('HP-UX', 'SunOS'):
# HP-UX oraz Solaris can allow a non-root user to chown() to root
# (issue #5113)
podnieś unittest.SkipTest("Skipping because of non-standard chown() "
"behavior")
inaczej:
# non-root cannot chown to root, podnieśs OSError
self.assertRaises(OSError, chown_func, first_param, 0, 0)
check_stat(uid, gid)
self.assertRaises(OSError, chown_func, first_param, 0, -1)
check_stat(uid, gid)
jeżeli 0 nie w os.getgroups():
self.assertRaises(OSError, chown_func, first_param, -1, 0)
check_stat(uid, gid)
# test illegal types
dla t w str, float:
self.assertRaises(TypeError, chown_func, first_param, t(uid), gid)
check_stat(uid, gid)
self.assertRaises(TypeError, chown_func, first_param, uid, t(gid))
check_stat(uid, gid)
@unittest.skipUnless(hasattr(posix, 'chown'), "test needs os.chown()")
def test_chown(self):
# podnieś an OSError jeżeli the file does nie exist
os.unlink(support.TESTFN)
self.assertRaises(OSError, posix.chown, support.TESTFN, -1, -1)
# re-create the file
support.create_empty_file(support.TESTFN)
self._test_all_chown_common(posix.chown, support.TESTFN,
getattr(posix, 'stat', Nic))
@unittest.skipUnless(hasattr(posix, 'fchown'), "test needs os.fchown()")
def test_fchown(self):
os.unlink(support.TESTFN)
# re-create the file
test_file = open(support.TESTFN, 'w')
spróbuj:
fd = test_file.fileno()
self._test_all_chown_common(posix.fchown, fd,
getattr(posix, 'fstat', Nic))
w_końcu:
test_file.close()
@unittest.skipUnless(hasattr(posix, 'lchown'), "test needs os.lchown()")
def test_lchown(self):
os.unlink(support.TESTFN)
# create a symlink
os.symlink(_DUMMY_SYMLINK, support.TESTFN)
self._test_all_chown_common(posix.lchown, support.TESTFN,
getattr(posix, 'lstat', Nic))
@unittest.skipUnless(hasattr(posix, 'chdir'), 'test needs posix.chdir()')
def test_chdir(self):
posix.chdir(os.curdir)
self.assertRaises(OSError, posix.chdir, support.TESTFN)
def test_listdir(self):
self.assertPrawda(support.TESTFN w posix.listdir(os.curdir))
def test_listdir_default(self):
# When listdir jest called without argument,
# it's the same jako listdir(os.curdir).
self.assertPrawda(support.TESTFN w posix.listdir())
def test_listdir_bytes(self):
# When listdir jest called przy a bytes object,
# the returned strings are of type bytes.
self.assertPrawda(os.fsencode(support.TESTFN) w posix.listdir(b'.'))
@unittest.skipUnless(posix.listdir w os.supports_fd,
"test needs fd support dla posix.listdir()")
def test_listdir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
self.addCleanup(posix.close, f)
self.assertEqual(
sorted(posix.listdir('.')),
sorted(posix.listdir(f))
)
# Check that the fd offset was reset (issue #13739)
self.assertEqual(
sorted(posix.listdir('.')),
sorted(posix.listdir(f))
)
@unittest.skipUnless(hasattr(posix, 'access'), 'test needs posix.access()')
def test_access(self):
self.assertPrawda(posix.access(support.TESTFN, os.R_OK))
@unittest.skipUnless(hasattr(posix, 'umask'), 'test needs posix.umask()')
def test_umask(self):
old_mask = posix.umask(0)
self.assertIsInstance(old_mask, int)
posix.umask(old_mask)
@unittest.skipUnless(hasattr(posix, 'strerror'),
'test needs posix.strerror()')
def test_strerror(self):
self.assertPrawda(posix.strerror(0))
@unittest.skipUnless(hasattr(posix, 'pipe'), 'test needs posix.pipe()')
def test_pipe(self):
reader, writer = posix.pipe()
os.close(reader)
os.close(writer)
@unittest.skipUnless(hasattr(os, 'pipe2'), "test needs os.pipe2()")
@support.requires_linux_version(2, 6, 27)
def test_pipe2(self):
self.assertRaises(TypeError, os.pipe2, 'DEADBEEF')
self.assertRaises(TypeError, os.pipe2, 0, 0)
# try calling przy flags = 0, like os.pipe()
r, w = os.pipe2(0)
os.close(r)
os.close(w)
# test flags
r, w = os.pipe2(os.O_CLOEXEC|os.O_NONBLOCK)
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
self.assertNieprawda(os.get_inheritable(r))
self.assertNieprawda(os.get_inheritable(w))
self.assertNieprawda(os.get_blocking(r))
self.assertNieprawda(os.get_blocking(w))
# try reading z an empty pipe: this should fail, nie block
self.assertRaises(OSError, os.read, r, 1)
# try a write big enough to fill-up the pipe: this should either
# fail albo perform a partial write, nie block
spróbuj:
os.write(w, b'x' * support.PIPE_MAX_SIZE)
wyjąwszy OSError:
dalej
@support.cpython_only
@unittest.skipUnless(hasattr(os, 'pipe2'), "test needs os.pipe2()")
@support.requires_linux_version(2, 6, 27)
def test_pipe2_c_limits(self):
# Issue 15989
zaimportuj _testcapi
self.assertRaises(OverflowError, os.pipe2, _testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, os.pipe2, _testcapi.UINT_MAX + 1)
@unittest.skipUnless(hasattr(posix, 'utime'), 'test needs posix.utime()')
def test_utime(self):
now = time.time()
posix.utime(support.TESTFN, Nic)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (Nic, Nic))
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, Nic))
self.assertRaises(TypeError, posix.utime, support.TESTFN, (Nic, now))
posix.utime(support.TESTFN, (int(now), int(now)))
posix.utime(support.TESTFN, (now, now))
def _test_chflags_regular_file(self, chflags_func, target_file, **kwargs):
st = os.stat(target_file)
self.assertPrawda(hasattr(st, 'st_flags'))
# ZFS returns EOPNOTSUPP when attempting to set flag UF_IMMUTABLE.
flags = st.st_flags | stat.UF_IMMUTABLE
spróbuj:
chflags_func(target_file, flags, **kwargs)
wyjąwszy OSError jako err:
jeżeli err.errno != errno.EOPNOTSUPP:
podnieś
msg = 'chflag UF_IMMUTABLE nie supported by underlying fs'
self.skipTest(msg)
spróbuj:
new_st = os.stat(target_file)
self.assertEqual(st.st_flags | stat.UF_IMMUTABLE, new_st.st_flags)
spróbuj:
fd = open(target_file, 'w+')
wyjąwszy OSError jako e:
self.assertEqual(e.errno, errno.EPERM)
w_końcu:
posix.chflags(target_file, st.st_flags)
@unittest.skipUnless(hasattr(posix, 'chflags'), 'test needs os.chflags()')
def test_chflags(self):
self._test_chflags_regular_file(posix.chflags, support.TESTFN)
@unittest.skipUnless(hasattr(posix, 'lchflags'), 'test needs os.lchflags()')
def test_lchflags_regular_file(self):
self._test_chflags_regular_file(posix.lchflags, support.TESTFN)
self._test_chflags_regular_file(posix.chflags, support.TESTFN, follow_symlinks=Nieprawda)
@unittest.skipUnless(hasattr(posix, 'lchflags'), 'test needs os.lchflags()')
def test_lchflags_symlink(self):
testfn_st = os.stat(support.TESTFN)
self.assertPrawda(hasattr(testfn_st, 'st_flags'))
os.symlink(support.TESTFN, _DUMMY_SYMLINK)
self.teardown_files.append(_DUMMY_SYMLINK)
dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)
def chflags_nofollow(path, flags):
zwróć posix.chflags(path, flags, follow_symlinks=Nieprawda)
dla fn w (posix.lchflags, chflags_nofollow):
# ZFS returns EOPNOTSUPP when attempting to set flag UF_IMMUTABLE.
flags = dummy_symlink_st.st_flags | stat.UF_IMMUTABLE
spróbuj:
fn(_DUMMY_SYMLINK, flags)
wyjąwszy OSError jako err:
jeżeli err.errno != errno.EOPNOTSUPP:
podnieś
msg = 'chflag UF_IMMUTABLE nie supported by underlying fs'
self.skipTest(msg)
spróbuj:
new_testfn_st = os.stat(support.TESTFN)
new_dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)
self.assertEqual(testfn_st.st_flags, new_testfn_st.st_flags)
self.assertEqual(dummy_symlink_st.st_flags | stat.UF_IMMUTABLE,
new_dummy_symlink_st.st_flags)
w_końcu:
fn(_DUMMY_SYMLINK, dummy_symlink_st.st_flags)
def test_environ(self):
jeżeli os.name == "nt":
item_type = str
inaczej:
item_type = bytes
dla k, v w posix.environ.items():
self.assertEqual(type(k), item_type)
self.assertEqual(type(v), item_type)
@unittest.skipUnless(hasattr(posix, 'getcwd'), 'test needs posix.getcwd()')
def test_getcwd_long_pathnames(self):
dirname = 'getcwd-test-directory-0123456789abcdef-01234567890abcdef'
curdir = os.getcwd()
base_path = os.path.abspath(support.TESTFN) + '.getcwd'
spróbuj:
os.mkdir(base_path)
os.chdir(base_path)
wyjąwszy:
# Just returning nothing instead of the SkipTest exception, because
# the test results w Error w that case. Is that ok?
# podnieś unittest.SkipTest("cannot create directory dla testing")
zwróć
def _create_and_do_getcwd(dirname, current_path_length = 0):
spróbuj:
os.mkdir(dirname)
wyjąwszy:
podnieś unittest.SkipTest("mkdir cannot create directory sufficiently deep dla getcwd test")
os.chdir(dirname)
spróbuj:
os.getcwd()
jeżeli current_path_length < 1027:
_create_and_do_getcwd(dirname, current_path_length + len(dirname) + 1)
w_końcu:
os.chdir('..')
os.rmdir(dirname)
_create_and_do_getcwd(dirname)
w_końcu:
os.chdir(curdir)
support.rmtree(base_path)
@unittest.skipUnless(hasattr(posix, 'getgrouplist'), "test needs posix.getgrouplist()")
@unittest.skipUnless(hasattr(pwd, 'getpwuid'), "test needs pwd.getpwuid()")
@unittest.skipUnless(hasattr(os, 'getuid'), "test needs os.getuid()")
def test_getgrouplist(self):
user = pwd.getpwuid(os.getuid())[0]
group = pwd.getpwuid(os.getuid())[3]
self.assertIn(group, posix.getgrouplist(user, group))
@unittest.skipUnless(hasattr(os, 'getegid'), "test needs os.getegid()")
def test_getgroups(self):
przy os.popen('id -G 2>/dev/null') jako idg:
groups = idg.read().strip()
ret = idg.close()
jeżeli ret jest nie Nic albo nie groups:
podnieś unittest.SkipTest("need working 'id -G'")
# Issues 16698: OS X ABIs prior to 10.6 have limits on getgroups()
jeżeli sys.platform == 'darwin':
zaimportuj sysconfig
dt = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') albo '10.0'
jeżeli tuple(int(n) dla n w dt.split('.')[0:2]) < (10, 6):
podnieś unittest.SkipTest("getgroups(2) jest broken prior to 10.6")
# 'id -G' oraz 'os.getgroups()' should zwróć the same
# groups, ignoring order oraz duplicates.
# #10822 - it jest implementation defined whether posix.getgroups()
# includes the effective gid so we include it anyway, since id -G does
self.assertEqual(
set([int(x) dla x w groups.split()]),
set(posix.getgroups() + [posix.getegid()]))
# tests dla the posix *at functions follow
@unittest.skipUnless(os.access w os.supports_dir_fd, "test needs dir_fd support dla os.access()")
def test_access_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
self.assertPrawda(posix.access(support.TESTFN, os.R_OK, dir_fd=f))
w_końcu:
posix.close(f)
@unittest.skipUnless(os.chmod w os.supports_dir_fd, "test needs dir_fd support w os.chmod()")
def test_chmod_dir_fd(self):
os.chmod(support.TESTFN, stat.S_IRUSR)
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
posix.chmod(support.TESTFN, stat.S_IRUSR | stat.S_IWUSR, dir_fd=f)
s = posix.stat(support.TESTFN)
self.assertEqual(s[0] & stat.S_IRWXU, stat.S_IRUSR | stat.S_IWUSR)
w_końcu:
posix.close(f)
@unittest.skipUnless(os.chown w os.supports_dir_fd, "test needs dir_fd support w os.chown()")
def test_chown_dir_fd(self):
support.unlink(support.TESTFN)
support.create_empty_file(support.TESTFN)
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
posix.chown(support.TESTFN, os.getuid(), os.getgid(), dir_fd=f)
w_końcu:
posix.close(f)
@unittest.skipUnless(os.stat w os.supports_dir_fd, "test needs dir_fd support w os.stat()")
def test_stat_dir_fd(self):
support.unlink(support.TESTFN)
przy open(support.TESTFN, 'w') jako outfile:
outfile.write("testline\n")
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
s1 = posix.stat(support.TESTFN)
s2 = posix.stat(support.TESTFN, dir_fd=f)
self.assertEqual(s1, s2)
s2 = posix.stat(support.TESTFN, dir_fd=Nic)
self.assertEqual(s1, s2)
self.assertRaisesRegex(TypeError, 'should be integer, not',
posix.stat, support.TESTFN, dir_fd=posix.getcwd())
self.assertRaisesRegex(TypeError, 'should be integer, not',
posix.stat, support.TESTFN, dir_fd=float(f))
self.assertRaises(OverflowError,
posix.stat, support.TESTFN, dir_fd=10**20)
w_końcu:
posix.close(f)
@unittest.skipUnless(os.utime w os.supports_dir_fd, "test needs dir_fd support w os.utime()")
def test_utime_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
now = time.time()
posix.utime(support.TESTFN, Nic, dir_fd=f)
posix.utime(support.TESTFN, dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, now, dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (Nic, Nic), dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, Nic), dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (Nic, now), dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, "x"), dir_fd=f)
posix.utime(support.TESTFN, (int(now), int(now)), dir_fd=f)
posix.utime(support.TESTFN, (now, now), dir_fd=f)
posix.utime(support.TESTFN,
(int(now), int((now - int(now)) * 1e9)), dir_fd=f)
posix.utime(support.TESTFN, dir_fd=f,
times=(int(now), int((now - int(now)) * 1e9)))
# try dir_fd oraz follow_symlinks together
jeżeli os.utime w os.supports_follow_symlinks:
spróbuj:
posix.utime(support.TESTFN, follow_symlinks=Nieprawda, dir_fd=f)
wyjąwszy ValueError:
# whoops! using both together nie supported on this platform.
dalej
w_końcu:
posix.close(f)
@unittest.skipUnless(os.link w os.supports_dir_fd, "test needs dir_fd support w os.link()")
def test_link_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
posix.link(support.TESTFN, support.TESTFN + 'link', src_dir_fd=f, dst_dir_fd=f)
# should have same inodes
self.assertEqual(posix.stat(support.TESTFN)[1],
posix.stat(support.TESTFN + 'link')[1])
w_końcu:
posix.close(f)
support.unlink(support.TESTFN + 'link')
@unittest.skipUnless(os.mkdir w os.supports_dir_fd, "test needs dir_fd support w os.mkdir()")
def test_mkdir_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
posix.mkdir(support.TESTFN + 'dir', dir_fd=f)
posix.stat(support.TESTFN + 'dir') # should nie podnieś exception
w_końcu:
posix.close(f)
support.rmtree(support.TESTFN + 'dir')
@unittest.skipUnless((os.mknod w os.supports_dir_fd) oraz hasattr(stat, 'S_IFIFO'),
"test requires both stat.S_IFIFO oraz dir_fd support dla os.mknod()")
def test_mknod_dir_fd(self):
# Test using mknodat() to create a FIFO (the only use specified
# by POSIX).
support.unlink(support.TESTFN)
mode = stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
posix.mknod(support.TESTFN, mode, 0, dir_fd=f)
wyjąwszy OSError jako e:
# Some old systems don't allow unprivileged users to use
# mknod(), albo only support creating device nodes.
self.assertIn(e.errno, (errno.EPERM, errno.EINVAL))
inaczej:
self.assertPrawda(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
w_końcu:
posix.close(f)
@unittest.skipUnless(os.open w os.supports_dir_fd, "test needs dir_fd support w os.open()")
def test_open_dir_fd(self):
support.unlink(support.TESTFN)
przy open(support.TESTFN, 'w') jako outfile:
outfile.write("testline\n")
a = posix.open(posix.getcwd(), posix.O_RDONLY)
b = posix.open(support.TESTFN, posix.O_RDONLY, dir_fd=a)
spróbuj:
res = posix.read(b, 9).decode(encoding="utf-8")
self.assertEqual("testline\n", res)
w_końcu:
posix.close(a)
posix.close(b)
@unittest.skipUnless(os.readlink w os.supports_dir_fd, "test needs dir_fd support w os.readlink()")
def test_readlink_dir_fd(self):
os.symlink(support.TESTFN, support.TESTFN + 'link')
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
self.assertEqual(posix.readlink(support.TESTFN + 'link'),
posix.readlink(support.TESTFN + 'link', dir_fd=f))
w_końcu:
support.unlink(support.TESTFN + 'link')
posix.close(f)
@unittest.skipUnless(os.rename w os.supports_dir_fd, "test needs dir_fd support w os.rename()")
def test_rename_dir_fd(self):
support.unlink(support.TESTFN)
support.create_empty_file(support.TESTFN + 'ren')
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
posix.rename(support.TESTFN + 'ren', support.TESTFN, src_dir_fd=f, dst_dir_fd=f)
wyjąwszy:
posix.rename(support.TESTFN + 'ren', support.TESTFN)
podnieś
inaczej:
posix.stat(support.TESTFN) # should nie podnieś exception
w_końcu:
posix.close(f)
@unittest.skipUnless(os.symlink w os.supports_dir_fd, "test needs dir_fd support w os.symlink()")
def test_symlink_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
posix.symlink(support.TESTFN, support.TESTFN + 'link', dir_fd=f)
self.assertEqual(posix.readlink(support.TESTFN + 'link'), support.TESTFN)
w_końcu:
posix.close(f)
support.unlink(support.TESTFN + 'link')
@unittest.skipUnless(os.unlink w os.supports_dir_fd, "test needs dir_fd support w os.unlink()")
def test_unlink_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
support.create_empty_file(support.TESTFN + 'del')
posix.stat(support.TESTFN + 'del') # should nie podnieś exception
spróbuj:
posix.unlink(support.TESTFN + 'del', dir_fd=f)
wyjąwszy:
support.unlink(support.TESTFN + 'del')
podnieś
inaczej:
self.assertRaises(OSError, posix.stat, support.TESTFN + 'link')
w_końcu:
posix.close(f)
@unittest.skipUnless(os.mkfifo w os.supports_dir_fd, "test needs dir_fd support w os.mkfifo()")
def test_mkfifo_dir_fd(self):
support.unlink(support.TESTFN)
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
posix.mkfifo(support.TESTFN, stat.S_IRUSR | stat.S_IWUSR, dir_fd=f)
self.assertPrawda(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
w_końcu:
posix.close(f)
requires_sched_h = unittest.skipUnless(hasattr(posix, 'sched_uzyskaj'),
"don't have scheduling support")
requires_sched_affinity = unittest.skipUnless(hasattr(posix, 'sched_setaffinity'),
"don't have sched affinity support")
@requires_sched_h
def test_sched_uzyskaj(self):
# This has no error conditions (at least on Linux).
posix.sched_uzyskaj()
@requires_sched_h
@unittest.skipUnless(hasattr(posix, 'sched_get_priority_max'),
"requires sched_get_priority_max()")
def test_sched_priority(self):
# Round-robin usually has interesting priorities.
pol = posix.SCHED_RR
lo = posix.sched_get_priority_min(pol)
hi = posix.sched_get_priority_max(pol)
self.assertIsInstance(lo, int)
self.assertIsInstance(hi, int)
self.assertGreaterEqual(hi, lo)
# OSX evidently just returns 15 without checking the argument.
jeżeli sys.platform != "darwin":
self.assertRaises(OSError, posix.sched_get_priority_min, -23)
self.assertRaises(OSError, posix.sched_get_priority_max, -23)
@unittest.skipUnless(hasattr(posix, 'sched_setscheduler'), "can't change scheduler")
def test_get_and_set_scheduler_and_param(self):
possible_schedulers = [sched dla name, sched w posix.__dict__.items()
jeżeli name.startswith("SCHED_")]
mine = posix.sched_getscheduler(0)
self.assertIn(mine, possible_schedulers)
spróbuj:
parent = posix.sched_getscheduler(os.getppid())
wyjąwszy OSError jako e:
jeżeli e.errno != errno.EPERM:
podnieś
inaczej:
self.assertIn(parent, possible_schedulers)
self.assertRaises(OSError, posix.sched_getscheduler, -1)
self.assertRaises(OSError, posix.sched_getparam, -1)
param = posix.sched_getparam(0)
self.assertIsInstance(param.sched_priority, int)
# POSIX states that calling sched_setparam() albo sched_setscheduler() on
# a process przy a scheduling policy other than SCHED_FIFO albo SCHED_RR
# jest implementation-defined: NetBSD oraz FreeBSD can zwróć EINVAL.
jeżeli nie sys.platform.startswith(('freebsd', 'netbsd')):
spróbuj:
posix.sched_setscheduler(0, mine, param)
posix.sched_setparam(0, param)
wyjąwszy OSError jako e:
jeżeli e.errno != errno.EPERM:
podnieś
self.assertRaises(OSError, posix.sched_setparam, -1, param)
self.assertRaises(OSError, posix.sched_setscheduler, -1, mine, param)
self.assertRaises(TypeError, posix.sched_setscheduler, 0, mine, Nic)
self.assertRaises(TypeError, posix.sched_setparam, 0, 43)
param = posix.sched_param(Nic)
self.assertRaises(TypeError, posix.sched_setparam, 0, param)
large = 214748364700
param = posix.sched_param(large)
self.assertRaises(OverflowError, posix.sched_setparam, 0, param)
param = posix.sched_param(sched_priority=-large)
self.assertRaises(OverflowError, posix.sched_setparam, 0, param)
@unittest.skipUnless(hasattr(posix, "sched_rr_get_interval"), "no function")
def test_sched_rr_get_interval(self):
spróbuj:
interval = posix.sched_rr_get_interval(0)
wyjąwszy OSError jako e:
# This likely means that sched_rr_get_interval jest only valid for
# processes przy the SCHED_RR scheduler w effect.
jeżeli e.errno != errno.EINVAL:
podnieś
self.skipTest("only works on SCHED_RR processes")
self.assertIsInstance(interval, float)
# Reasonable constraints, I think.
self.assertGreaterEqual(interval, 0.)
self.assertLess(interval, 1.)
@requires_sched_affinity
def test_sched_getaffinity(self):
mask = posix.sched_getaffinity(0)
self.assertIsInstance(mask, set)
self.assertGreaterEqual(len(mask), 1)
self.assertRaises(OSError, posix.sched_getaffinity, -1)
dla cpu w mask:
self.assertIsInstance(cpu, int)
self.assertGreaterEqual(cpu, 0)
self.assertLess(cpu, 1 << 32)
@requires_sched_affinity
def test_sched_setaffinity(self):
mask = posix.sched_getaffinity(0)
jeżeli len(mask) > 1:
# Empty masks are forbidden
mask.pop()
posix.sched_setaffinity(0, mask)
self.assertEqual(posix.sched_getaffinity(0), mask)
self.assertRaises(OSError, posix.sched_setaffinity, 0, [])
self.assertRaises(ValueError, posix.sched_setaffinity, 0, [-10])
self.assertRaises(OverflowError, posix.sched_setaffinity, 0, [1<<128])
self.assertRaises(OSError, posix.sched_setaffinity, -1, mask)
def test_rtld_constants(self):
# check presence of major RTLD_* constants
posix.RTLD_LAZY
posix.RTLD_NOW
posix.RTLD_GLOBAL
posix.RTLD_LOCAL
@unittest.skipUnless(hasattr(os, 'SEEK_HOLE'),
"test needs an OS that reports file holes")
def test_fs_holes(self):
# Even jeżeli the filesystem doesn't report holes,
# jeżeli the OS supports it the SEEK_* constants
# will be defined oraz will have a consistent
# behaviour:
# os.SEEK_DATA = current position
# os.SEEK_HOLE = end of file position
przy open(support.TESTFN, 'r+b') jako fp:
fp.write(b"hello")
fp.flush()
size = fp.tell()
fno = fp.fileno()
try :
dla i w range(size):
self.assertEqual(i, os.lseek(fno, i, os.SEEK_DATA))
self.assertLessEqual(size, os.lseek(fno, i, os.SEEK_HOLE))
self.assertRaises(OSError, os.lseek, fno, size, os.SEEK_DATA)
self.assertRaises(OSError, os.lseek, fno, size, os.SEEK_HOLE)
wyjąwszy OSError :
# Some OSs claim to support SEEK_HOLE/SEEK_DATA
# but it jest nie true.
# For instance:
# http://lists.freebsd.org/pipermail/freebsd-amd64/2012-January/014332.html
podnieś unittest.SkipTest("OSError podnieśd!")
def test_path_error2(self):
"""
Test functions that call path_error2(), providing two filenames w their exceptions.
"""
dla name w ("rename", "replace", "link"):
function = getattr(os, name, Nic)
jeżeli function jest Nic:
kontynuuj
dla dst w ("noodly2", support.TESTFN):
spróbuj:
function('doesnotexistfilename', dst)
wyjąwszy OSError jako e:
self.assertIn("'doesnotexistfilename' -> '{}'".format(dst), str(e))
przerwij
inaczej:
self.fail("No valid path_error2() test dla os." + name)
def test_path_with_null_character(self):
fn = support.TESTFN
fn_with_NUL = fn + '\0'
self.addCleanup(support.unlink, fn)
support.unlink(fn)
fd = Nic
spróbuj:
przy self.assertRaises(ValueError):
fd = os.open(fn_with_NUL, os.O_WRONLY | os.O_CREAT) # podnieśs
w_końcu:
jeżeli fd jest nie Nic:
os.close(fd)
self.assertNieprawda(os.path.exists(fn))
self.assertRaises(ValueError, os.mkdir, fn_with_NUL)
self.assertNieprawda(os.path.exists(fn))
open(fn, 'wb').close()
self.assertRaises(ValueError, os.stat, fn_with_NUL)
def test_path_with_null_byte(self):
fn = os.fsencode(support.TESTFN)
fn_with_NUL = fn + b'\0'
self.addCleanup(support.unlink, fn)
support.unlink(fn)
fd = Nic
spróbuj:
przy self.assertRaises(ValueError):
fd = os.open(fn_with_NUL, os.O_WRONLY | os.O_CREAT) # podnieśs
w_końcu:
jeżeli fd jest nie Nic:
os.close(fd)
self.assertNieprawda(os.path.exists(fn))
self.assertRaises(ValueError, os.mkdir, fn_with_NUL)
self.assertNieprawda(os.path.exists(fn))
open(fn, 'wb').close()
self.assertRaises(ValueError, os.stat, fn_with_NUL)
klasa PosixGroupsTester(unittest.TestCase):
def setUp(self):
jeżeli posix.getuid() != 0:
podnieś unittest.SkipTest("not enough privileges")
jeżeli nie hasattr(posix, 'getgroups'):
podnieś unittest.SkipTest("need posix.getgroups")
jeżeli sys.platform == 'darwin':
podnieś unittest.SkipTest("getgroups(2) jest broken on OSX")
self.saved_groups = posix.getgroups()
def tearDown(self):
jeżeli hasattr(posix, 'setgroups'):
posix.setgroups(self.saved_groups)
albo_inaczej hasattr(posix, 'initgroups'):
name = pwd.getpwuid(posix.getuid()).pw_name
posix.initgroups(name, self.saved_groups[0])
@unittest.skipUnless(hasattr(posix, 'initgroups'),
"test needs posix.initgroups()")
def test_initgroups(self):
# find missing group
g = max(self.saved_groups albo [0]) + 1
name = pwd.getpwuid(posix.getuid()).pw_name
posix.initgroups(name, g)
self.assertIn(g, posix.getgroups())
@unittest.skipUnless(hasattr(posix, 'setgroups'),
"test needs posix.setgroups()")
def test_setgroups(self):
dla groups w [[0], list(range(16))]:
posix.setgroups(groups)
self.assertListEqual(groups, posix.getgroups())
def test_main():
spróbuj:
support.run_unittest(PosixTester, PosixGroupsTester)
w_końcu:
support.reap_children()
jeżeli __name__ == '__main__':
test_main()
| 41.836
| 120
| 0.604207
|
"Test posix functions"
z test zaimportuj support
posix = support.import_module('posix')
zaimportuj errno
zaimportuj sys
zaimportuj time
zaimportuj os
zaimportuj platform
zaimportuj pwd
zaimportuj shutil
zaimportuj stat
zaimportuj tempfile
zaimportuj unittest
zaimportuj warnings
_DUMMY_SYMLINK = os.path.join(tempfile.gettempdir(),
support.TESTFN + '-dummy-symlink')
klasa PosixTester(unittest.TestCase):
def setUp(self):
fp = open(support.TESTFN, 'w+')
fp.close()
self.teardown_files = [ support.TESTFN ]
self._warnings_manager = support.check_warnings()
self._warnings_manager.__enter__()
warnings.filterwarnings('ignore', '.* potential security risk .*',
RuntimeWarning)
def tearDown(self):
dla teardown_file w self.teardown_files:
support.unlink(teardown_file)
self._warnings_manager.__exit__(Nic, Nic, Nic)
def testNoArgFunctions(self):
NO_ARG_FUNCTIONS = [ "ctermid", "getcwd", "getcwdb", "uname",
"times", "getloadavg",
"getegid", "geteuid", "getgid", "getgroups",
"getpid", "getpgrp", "getppid", "getuid", "sync",
]
dla name w NO_ARG_FUNCTIONS:
posix_func = getattr(posix, name, Nic)
jeżeli posix_func jest nie Nic:
posix_func()
self.assertRaises(TypeError, posix_func, 1)
@unittest.skipUnless(hasattr(posix, 'getresuid'),
'test needs posix.getresuid()')
def test_getresuid(self):
user_ids = posix.getresuid()
self.assertEqual(len(user_ids), 3)
dla val w user_ids:
self.assertGreaterEqual(val, 0)
@unittest.skipUnless(hasattr(posix, 'getresgid'),
'test needs posix.getresgid()')
def test_getresgid(self):
group_ids = posix.getresgid()
self.assertEqual(len(group_ids), 3)
dla val w group_ids:
self.assertGreaterEqual(val, 0)
@unittest.skipUnless(hasattr(posix, 'setresuid'),
'test needs posix.setresuid()')
def test_setresuid(self):
current_user_ids = posix.getresuid()
self.assertIsNic(posix.setresuid(*current_user_ids))
self.assertIsNic(posix.setresuid(-1, -1, -1))
@unittest.skipUnless(hasattr(posix, 'setresuid'),
'test needs posix.setresuid()')
def test_setresuid_exception(self):
# Don't do this test jeżeli someone jest silly enough to run us jako root.
current_user_ids = posix.getresuid()
jeżeli 0 nie w current_user_ids:
new_user_ids = (current_user_ids[0]+1, -1, -1)
self.assertRaises(OSError, posix.setresuid, *new_user_ids)
@unittest.skipUnless(hasattr(posix, 'setresgid'),
'test needs posix.setresgid()')
def test_setresgid(self):
current_group_ids = posix.getresgid()
self.assertIsNic(posix.setresgid(*current_group_ids))
self.assertIsNic(posix.setresgid(-1, -1, -1))
@unittest.skipUnless(hasattr(posix, 'setresgid'),
'test needs posix.setresgid()')
def test_setresgid_exception(self):
# Don't do this test jeżeli someone jest silly enough to run us jako root.
current_group_ids = posix.getresgid()
jeżeli 0 nie w current_group_ids:
new_group_ids = (current_group_ids[0]+1, -1, -1)
self.assertRaises(OSError, posix.setresgid, *new_group_ids)
@unittest.skipUnless(hasattr(posix, 'initgroups'),
"test needs os.initgroups()")
def test_initgroups(self):
self.assertRaises(TypeError, posix.initgroups)
self.assertRaises(TypeError, posix.initgroups, Nic)
self.assertRaises(TypeError, posix.initgroups, 3, "foo")
self.assertRaises(TypeError, posix.initgroups, "foo", 3, object())
jeżeli os.getuid() != 0:
spróbuj:
name = pwd.getpwuid(posix.getuid()).pw_name
wyjąwszy KeyError:
podnieś unittest.SkipTest("need a pwd entry")
spróbuj:
posix.initgroups(name, 13)
wyjąwszy OSError jako e:
self.assertEqual(e.errno, errno.EPERM)
inaczej:
self.fail("Expected OSError to be podnieśd by initgroups")
@unittest.skipUnless(hasattr(posix, 'statvfs'),
'test needs posix.statvfs()')
def test_statvfs(self):
self.assertPrawda(posix.statvfs(os.curdir))
@unittest.skipUnless(hasattr(posix, 'fstatvfs'),
'test needs posix.fstatvfs()')
def test_fstatvfs(self):
fp = open(support.TESTFN)
spróbuj:
self.assertPrawda(posix.fstatvfs(fp.fileno()))
self.assertPrawda(posix.statvfs(fp.fileno()))
w_końcu:
fp.close()
@unittest.skipUnless(hasattr(posix, 'ftruncate'),
'test needs posix.ftruncate()')
def test_ftruncate(self):
fp = open(support.TESTFN, 'w+')
spróbuj:
fp.write('test')
fp.flush()
posix.ftruncate(fp.fileno(), 0)
w_końcu:
fp.close()
@unittest.skipUnless(hasattr(posix, 'truncate'), "test needs posix.truncate()")
def test_truncate(self):
przy open(support.TESTFN, 'w') jako fp:
fp.write('test')
fp.flush()
posix.truncate(support.TESTFN, 0)
@unittest.skipUnless(getattr(os, 'execve', Nic) w os.supports_fd, "test needs execve() to support the fd parameter")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_fexecve(self):
fp = os.open(sys.executable, os.O_RDONLY)
spróbuj:
pid = os.fork()
jeżeli pid == 0:
os.chdir(os.path.split(sys.executable)[0])
posix.execve(fp, [sys.executable, '-c', 'pass'], os.environ)
inaczej:
self.assertEqual(os.waitpid(pid, 0), (pid, 0))
w_końcu:
os.close(fp)
@unittest.skipUnless(hasattr(posix, 'waitid'), "test needs posix.waitid()")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
def test_waitid(self):
pid = os.fork()
jeżeli pid == 0:
os.chdir(os.path.split(sys.executable)[0])
posix.execve(sys.executable, [sys.executable, '-c', 'pass'], os.environ)
inaczej:
res = posix.waitid(posix.P_PID, pid, posix.WEXITED)
self.assertEqual(pid, res.si_pid)
@unittest.skipUnless(hasattr(posix, 'lockf'), "test needs posix.lockf()")
def test_lockf(self):
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_CREAT)
spróbuj:
os.write(fd, b'test')
os.lseek(fd, 0, os.SEEK_SET)
posix.lockf(fd, posix.F_LOCK, 4)
posix.lockf(fd, posix.F_ULOCK, 4)
w_końcu:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'pread'), "test needs posix.pread()")
def test_pread(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
spróbuj:
os.write(fd, b'test')
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(b'es', posix.pread(fd, 2, 1))
self.assertEqual(b'te', posix.read(fd, 2))
w_końcu:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'pwrite'), "test needs posix.pwrite()")
def test_pwrite(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
spróbuj:
os.write(fd, b'test')
os.lseek(fd, 0, os.SEEK_SET)
posix.pwrite(fd, b'xx', 1)
self.assertEqual(b'txxt', posix.read(fd, 4))
w_końcu:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'posix_fallocate'),
"test needs posix.posix_fallocate()")
def test_posix_fallocate(self):
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_CREAT)
spróbuj:
posix.posix_fallocate(fd, 0, 10)
wyjąwszy OSError jako inst:
# issue10812, ZFS doesn't appear to support posix_fallocate,
jeżeli inst.errno != errno.EINVAL albo nie sys.platform.startswith("sunos"):
podnieś
w_końcu:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'posix_fadvise'),
"test needs posix.posix_fadvise()")
def test_posix_fadvise(self):
fd = os.open(support.TESTFN, os.O_RDONLY)
spróbuj:
posix.posix_fadvise(fd, 0, 0, posix.POSIX_FADV_WILLNEED)
w_końcu:
os.close(fd)
@unittest.skipUnless(os.utime w os.supports_fd, "test needs fd support w os.utime")
def test_utime_with_fd(self):
now = time.time()
fd = os.open(support.TESTFN, os.O_RDONLY)
spróbuj:
posix.utime(fd)
posix.utime(fd, Nic)
self.assertRaises(TypeError, posix.utime, fd, (Nic, Nic))
self.assertRaises(TypeError, posix.utime, fd, (now, Nic))
self.assertRaises(TypeError, posix.utime, fd, (Nic, now))
posix.utime(fd, (int(now), int(now)))
posix.utime(fd, (now, now))
self.assertRaises(ValueError, posix.utime, fd, (now, now), ns=(now, now))
self.assertRaises(ValueError, posix.utime, fd, (now, 0), ns=(Nic, Nic))
self.assertRaises(ValueError, posix.utime, fd, (Nic, Nic), ns=(now, 0))
posix.utime(fd, (int(now), int((now - int(now)) * 1e9)))
posix.utime(fd, ns=(int(now), int((now - int(now)) * 1e9)))
w_końcu:
os.close(fd)
@unittest.skipUnless(os.utime w os.supports_follow_symlinks, "test needs follow_symlinks support w os.utime")
def test_utime_nofollow_symlinks(self):
now = time.time()
posix.utime(support.TESTFN, Nic, follow_symlinks=Nieprawda)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (Nic, Nic), follow_symlinks=Nieprawda)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, Nic), follow_symlinks=Nieprawda)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (Nic, now), follow_symlinks=Nieprawda)
posix.utime(support.TESTFN, (int(now), int(now)), follow_symlinks=Nieprawda)
posix.utime(support.TESTFN, (now, now), follow_symlinks=Nieprawda)
posix.utime(support.TESTFN, follow_symlinks=Nieprawda)
@unittest.skipUnless(hasattr(posix, 'writev'), "test needs posix.writev()")
def test_writev(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
spróbuj:
n = os.writev(fd, (b'test1', b'tt2', b't3'))
self.assertEqual(n, 10)
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(b'test1tt2t3', posix.read(fd, 10))
osix.writev(fd, [])
wyjąwszy OSError:
dalej
inaczej:
self.assertEqual(size, 0)
w_końcu:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'readv'), "test needs posix.readv()")
def test_readv(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
spróbuj:
os.write(fd, b'test1tt2t3')
os.lseek(fd, 0, os.SEEK_SET)
buf = [bytearray(i) dla i w [5, 3, 2]]
self.assertEqual(posix.readv(fd, buf), 10)
self.assertEqual([b'test1', b'tt2', b't3'], [bytes(i) dla i w buf])
osix.readv(fd, [])
wyjąwszy OSError:
dalej
inaczej:
self.assertEqual(size, 0)
w_końcu:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'dup'),
'test needs posix.dup()')
def test_dup(self):
fp = open(support.TESTFN)
spróbuj:
fd = posix.dup(fp.fileno())
self.assertIsInstance(fd, int)
os.close(fd)
w_końcu:
fp.close()
@unittest.skipUnless(hasattr(posix, 'confstr'),
'test needs posix.confstr()')
def test_confstr(self):
self.assertRaises(ValueError, posix.confstr, "CS_garbage")
self.assertEqual(len(posix.confstr("CS_PATH")) > 0, Prawda)
@unittest.skipUnless(hasattr(posix, 'dup2'),
'test needs posix.dup2()')
def test_dup2(self):
fp1 = open(support.TESTFN)
fp2 = open(support.TESTFN)
spróbuj:
posix.dup2(fp1.fileno(), fp2.fileno())
w_końcu:
fp1.close()
fp2.close()
@unittest.skipUnless(hasattr(os, 'O_CLOEXEC'), "needs os.O_CLOEXEC")
@support.requires_linux_version(2, 6, 23)
def test_oscloexec(self):
fd = os.open(support.TESTFN, os.O_RDONLY|os.O_CLOEXEC)
self.addCleanup(os.close, fd)
self.assertNieprawda(os.get_inheritable(fd))
@unittest.skipUnless(hasattr(posix, 'O_EXLOCK'),
'test needs posix.O_EXLOCK')
def test_osexlock(self):
fd = os.open(support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
jeżeli hasattr(posix, "O_SHLOCK"):
fd = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'O_SHLOCK'),
'test needs posix.O_SHLOCK')
def test_osshlock(self):
fd1 = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
fd2 = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
os.close(fd2)
os.close(fd1)
jeżeli hasattr(posix, "O_EXLOCK"):
fd = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, support.TESTFN,
os.O_RDONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'fstat'),
'test needs posix.fstat()')
def test_fstat(self):
fp = open(support.TESTFN)
spróbuj:
self.assertPrawda(posix.fstat(fp.fileno()))
self.assertPrawda(posix.stat(fp.fileno()))
self.assertRaisesRegex(TypeError,
'should be string, bytes albo integer, not',
posix.stat, float(fp.fileno()))
w_końcu:
fp.close()
@unittest.skipUnless(hasattr(posix, 'stat'),
'test needs posix.stat()')
def test_stat(self):
self.assertPrawda(posix.stat(support.TESTFN))
self.assertPrawda(posix.stat(os.fsencode(support.TESTFN)))
self.assertPrawda(posix.stat(bytearray(os.fsencode(support.TESTFN))))
self.assertRaisesRegex(TypeError,
'can\'t specify Nic dla path argument',
posix.stat, Nic)
self.assertRaisesRegex(TypeError,
'should be string, bytes albo integer, not',
posix.stat, list(support.TESTFN))
self.assertRaisesRegex(TypeError,
'should be string, bytes albo integer, not',
posix.stat, list(os.fsencode(support.TESTFN)))
@unittest.skipUnless(hasattr(posix, 'mkfifo'), "don't have mkfifo()")
def test_mkfifo(self):
support.unlink(support.TESTFN)
posix.mkfifo(support.TESTFN, stat.S_IRUSR | stat.S_IWUSR)
self.assertPrawda(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
@unittest.skipUnless(hasattr(posix, 'mknod') oraz hasattr(stat, 'S_IFIFO'),
"don't have mknod()/S_IFIFO")
def test_mknod(self):
# Test using mknod() to create a FIFO (the only use specified
# by POSIX).
support.unlink(support.TESTFN)
mode = stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR
spróbuj:
posix.mknod(support.TESTFN, mode, 0)
wyjąwszy OSError jako e:
# Some old systems don't allow unprivileged users to use
self.assertIn(e.errno, (errno.EPERM, errno.EINVAL))
inaczej:
self.assertPrawda(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
@unittest.skipUnless(hasattr(posix, 'stat'), 'test needs posix.stat()')
@unittest.skipUnless(hasattr(posix, 'makedev'), 'test needs posix.makedev()')
def test_makedev(self):
st = posix.stat(support.TESTFN)
dev = st.st_dev
self.assertIsInstance(dev, int)
self.assertGreaterEqual(dev, 0)
major = posix.major(dev)
self.assertIsInstance(major, int)
self.assertGreaterEqual(major, 0)
self.assertEqual(posix.major(dev), major)
self.assertRaises(TypeError, posix.major, float(dev))
self.assertRaises(TypeError, posix.major)
self.assertRaises((ValueError, OverflowError), posix.major, -1)
minor = posix.minor(dev)
self.assertIsInstance(minor, int)
self.assertGreaterEqual(minor, 0)
self.assertEqual(posix.minor(dev), minor)
self.assertRaises(TypeError, posix.minor, float(dev))
self.assertRaises(TypeError, posix.minor)
self.assertRaises((ValueError, OverflowError), posix.minor, -1)
self.assertEqual(posix.makedev(major, minor), dev)
self.assertRaises(TypeError, posix.makedev, float(major), minor)
self.assertRaises(TypeError, posix.makedev, major, float(minor))
self.assertRaises(TypeError, posix.makedev, major)
self.assertRaises(TypeError, posix.makedev)
def _test_all_chown_common(self, chown_func, first_param, stat_func):
"""Common code dla chown, fchown oraz lchown tests."""
def check_stat(uid, gid):
jeżeli stat_func jest nie Nic:
stat = stat_func(first_param)
self.assertEqual(stat.st_uid, uid)
self.assertEqual(stat.st_gid, gid)
uid = os.getuid()
gid = os.getgid()
chown_func(first_param, uid, gid)
check_stat(uid, gid)
chown_func(first_param, -1, gid)
check_stat(uid, gid)
chown_func(first_param, uid, -1)
check_stat(uid, gid)
jeżeli uid == 0:
#
# This problem keeps coming up:
# http://bugs.python.org/issue1747858
# http://bugs.python.org/issue4591
# http://bugs.python.org/issue15301
# Hopefully the fix w 4591 fixes it dla good!
#
# This part of the test only runs when run jako root.
# Only scary people run their tests jako root.
big_value = 2**31
chown_func(first_param, big_value, big_value)
check_stat(big_value, big_value)
chown_func(first_param, -1, -1)
check_stat(big_value, big_value)
chown_func(first_param, uid, gid)
check_stat(uid, gid)
albo_inaczej platform.system() w ('HP-UX', 'SunOS'):
# HP-UX oraz Solaris can allow a non-root user to chown() to root
# (issue #5113)
podnieś unittest.SkipTest("Skipping because of non-standard chown() "
"behavior")
inaczej:
# non-root cannot chown to root, podnieśs OSError
self.assertRaises(OSError, chown_func, first_param, 0, 0)
check_stat(uid, gid)
self.assertRaises(OSError, chown_func, first_param, 0, -1)
check_stat(uid, gid)
jeżeli 0 nie w os.getgroups():
self.assertRaises(OSError, chown_func, first_param, -1, 0)
check_stat(uid, gid)
# test illegal types
dla t w str, float:
self.assertRaises(TypeError, chown_func, first_param, t(uid), gid)
check_stat(uid, gid)
self.assertRaises(TypeError, chown_func, first_param, uid, t(gid))
check_stat(uid, gid)
@unittest.skipUnless(hasattr(posix, 'chown'), "test needs os.chown()")
def test_chown(self):
# podnieś an OSError jeżeli the file does nie exist
os.unlink(support.TESTFN)
self.assertRaises(OSError, posix.chown, support.TESTFN, -1, -1)
# re-create the file
support.create_empty_file(support.TESTFN)
self._test_all_chown_common(posix.chown, support.TESTFN,
getattr(posix, 'stat', Nic))
@unittest.skipUnless(hasattr(posix, 'fchown'), "test needs os.fchown()")
def test_fchown(self):
os.unlink(support.TESTFN)
# re-create the file
test_file = open(support.TESTFN, 'w')
spróbuj:
fd = test_file.fileno()
self._test_all_chown_common(posix.fchown, fd,
getattr(posix, 'fstat', Nic))
w_końcu:
test_file.close()
@unittest.skipUnless(hasattr(posix, 'lchown'), "test needs os.lchown()")
def test_lchown(self):
os.unlink(support.TESTFN)
# create a symlink
os.symlink(_DUMMY_SYMLINK, support.TESTFN)
self._test_all_chown_common(posix.lchown, support.TESTFN,
getattr(posix, 'lstat', Nic))
@unittest.skipUnless(hasattr(posix, 'chdir'), 'test needs posix.chdir()')
def test_chdir(self):
posix.chdir(os.curdir)
self.assertRaises(OSError, posix.chdir, support.TESTFN)
def test_listdir(self):
self.assertPrawda(support.TESTFN w posix.listdir(os.curdir))
def test_listdir_default(self):
# When listdir jest called without argument,
# it's the same jako listdir(os.curdir).
self.assertPrawda(support.TESTFN w posix.listdir())
def test_listdir_bytes(self):
self.assertPrawda(os.fsencode(support.TESTFN) w posix.listdir(b'.'))
@unittest.skipUnless(posix.listdir w os.supports_fd,
"test needs fd support dla posix.listdir()")
def test_listdir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
self.addCleanup(posix.close, f)
self.assertEqual(
sorted(posix.listdir('.')),
sorted(posix.listdir(f))
)
self.assertEqual(
sorted(posix.listdir('.')),
sorted(posix.listdir(f))
)
@unittest.skipUnless(hasattr(posix, 'access'), 'test needs posix.access()')
def test_access(self):
self.assertPrawda(posix.access(support.TESTFN, os.R_OK))
@unittest.skipUnless(hasattr(posix, 'umask'), 'test needs posix.umask()')
def test_umask(self):
old_mask = posix.umask(0)
self.assertIsInstance(old_mask, int)
posix.umask(old_mask)
@unittest.skipUnless(hasattr(posix, 'strerror'),
'test needs posix.strerror()')
def test_strerror(self):
self.assertPrawda(posix.strerror(0))
@unittest.skipUnless(hasattr(posix, 'pipe'), 'test needs posix.pipe()')
def test_pipe(self):
reader, writer = posix.pipe()
os.close(reader)
os.close(writer)
@unittest.skipUnless(hasattr(os, 'pipe2'), "test needs os.pipe2()")
@support.requires_linux_version(2, 6, 27)
def test_pipe2(self):
self.assertRaises(TypeError, os.pipe2, 'DEADBEEF')
self.assertRaises(TypeError, os.pipe2, 0, 0)
r, w = os.pipe2(0)
os.close(r)
os.close(w)
r, w = os.pipe2(os.O_CLOEXEC|os.O_NONBLOCK)
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
self.assertNieprawda(os.get_inheritable(r))
self.assertNieprawda(os.get_inheritable(w))
self.assertNieprawda(os.get_blocking(r))
self.assertNieprawda(os.get_blocking(w))
self.assertRaises(OSError, os.read, r, 1)
spróbuj:
os.write(w, b'x' * support.PIPE_MAX_SIZE)
wyjąwszy OSError:
dalej
@support.cpython_only
@unittest.skipUnless(hasattr(os, 'pipe2'), "test needs os.pipe2()")
@support.requires_linux_version(2, 6, 27)
def test_pipe2_c_limits(self):
zaimportuj _testcapi
self.assertRaises(OverflowError, os.pipe2, _testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, os.pipe2, _testcapi.UINT_MAX + 1)
@unittest.skipUnless(hasattr(posix, 'utime'), 'test needs posix.utime()')
def test_utime(self):
now = time.time()
posix.utime(support.TESTFN, Nic)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (Nic, Nic))
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, Nic))
self.assertRaises(TypeError, posix.utime, support.TESTFN, (Nic, now))
posix.utime(support.TESTFN, (int(now), int(now)))
posix.utime(support.TESTFN, (now, now))
def _test_chflags_regular_file(self, chflags_func, target_file, **kwargs):
st = os.stat(target_file)
self.assertPrawda(hasattr(st, 'st_flags'))
flags = st.st_flags | stat.UF_IMMUTABLE
spróbuj:
chflags_func(target_file, flags, **kwargs)
wyjąwszy OSError jako err:
jeżeli err.errno != errno.EOPNOTSUPP:
podnieś
msg = 'chflag UF_IMMUTABLE nie supported by underlying fs'
self.skipTest(msg)
spróbuj:
new_st = os.stat(target_file)
self.assertEqual(st.st_flags | stat.UF_IMMUTABLE, new_st.st_flags)
spróbuj:
fd = open(target_file, 'w+')
wyjąwszy OSError jako e:
self.assertEqual(e.errno, errno.EPERM)
w_końcu:
posix.chflags(target_file, st.st_flags)
@unittest.skipUnless(hasattr(posix, 'chflags'), 'test needs os.chflags()')
def test_chflags(self):
self._test_chflags_regular_file(posix.chflags, support.TESTFN)
@unittest.skipUnless(hasattr(posix, 'lchflags'), 'test needs os.lchflags()')
def test_lchflags_regular_file(self):
self._test_chflags_regular_file(posix.lchflags, support.TESTFN)
self._test_chflags_regular_file(posix.chflags, support.TESTFN, follow_symlinks=Nieprawda)
@unittest.skipUnless(hasattr(posix, 'lchflags'), 'test needs os.lchflags()')
def test_lchflags_symlink(self):
testfn_st = os.stat(support.TESTFN)
self.assertPrawda(hasattr(testfn_st, 'st_flags'))
os.symlink(support.TESTFN, _DUMMY_SYMLINK)
self.teardown_files.append(_DUMMY_SYMLINK)
dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)
def chflags_nofollow(path, flags):
zwróć posix.chflags(path, flags, follow_symlinks=Nieprawda)
dla fn w (posix.lchflags, chflags_nofollow):
flags = dummy_symlink_st.st_flags | stat.UF_IMMUTABLE
spróbuj:
fn(_DUMMY_SYMLINK, flags)
wyjąwszy OSError jako err:
jeżeli err.errno != errno.EOPNOTSUPP:
podnieś
msg = 'chflag UF_IMMUTABLE nie supported by underlying fs'
self.skipTest(msg)
spróbuj:
new_testfn_st = os.stat(support.TESTFN)
new_dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)
self.assertEqual(testfn_st.st_flags, new_testfn_st.st_flags)
self.assertEqual(dummy_symlink_st.st_flags | stat.UF_IMMUTABLE,
new_dummy_symlink_st.st_flags)
w_końcu:
fn(_DUMMY_SYMLINK, dummy_symlink_st.st_flags)
def test_environ(self):
jeżeli os.name == "nt":
item_type = str
inaczej:
item_type = bytes
dla k, v w posix.environ.items():
self.assertEqual(type(k), item_type)
self.assertEqual(type(v), item_type)
@unittest.skipUnless(hasattr(posix, 'getcwd'), 'test needs posix.getcwd()')
def test_getcwd_long_pathnames(self):
dirname = 'getcwd-test-directory-0123456789abcdef-01234567890abcdef'
curdir = os.getcwd()
base_path = os.path.abspath(support.TESTFN) + '.getcwd'
spróbuj:
os.mkdir(base_path)
os.chdir(base_path)
wyjąwszy:
zwróć
def _create_and_do_getcwd(dirname, current_path_length = 0):
spróbuj:
os.mkdir(dirname)
wyjąwszy:
podnieś unittest.SkipTest("mkdir cannot create directory sufficiently deep dla getcwd test")
os.chdir(dirname)
spróbuj:
os.getcwd()
jeżeli current_path_length < 1027:
_create_and_do_getcwd(dirname, current_path_length + len(dirname) + 1)
w_końcu:
os.chdir('..')
os.rmdir(dirname)
_create_and_do_getcwd(dirname)
w_końcu:
os.chdir(curdir)
support.rmtree(base_path)
@unittest.skipUnless(hasattr(posix, 'getgrouplist'), "test needs posix.getgrouplist()")
@unittest.skipUnless(hasattr(pwd, 'getpwuid'), "test needs pwd.getpwuid()")
@unittest.skipUnless(hasattr(os, 'getuid'), "test needs os.getuid()")
def test_getgrouplist(self):
user = pwd.getpwuid(os.getuid())[0]
group = pwd.getpwuid(os.getuid())[3]
self.assertIn(group, posix.getgrouplist(user, group))
@unittest.skipUnless(hasattr(os, 'getegid'), "test needs os.getegid()")
def test_getgroups(self):
przy os.popen('id -G 2>/dev/null') jako idg:
groups = idg.read().strip()
ret = idg.close()
jeżeli ret jest nie Nic albo nie groups:
podnieś unittest.SkipTest("need working 'id -G'")
jeżeli sys.platform == 'darwin':
zaimportuj sysconfig
dt = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') albo '10.0'
jeżeli tuple(int(n) dla n w dt.split('.')[0:2]) < (10, 6):
podnieś unittest.SkipTest("getgroups(2) jest broken prior to 10.6")
la x w groups.split()]),
set(posix.getgroups() + [posix.getegid()]))
@unittest.skipUnless(os.access w os.supports_dir_fd, "test needs dir_fd support dla os.access()")
def test_access_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
self.assertPrawda(posix.access(support.TESTFN, os.R_OK, dir_fd=f))
w_końcu:
posix.close(f)
@unittest.skipUnless(os.chmod w os.supports_dir_fd, "test needs dir_fd support w os.chmod()")
def test_chmod_dir_fd(self):
os.chmod(support.TESTFN, stat.S_IRUSR)
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
posix.chmod(support.TESTFN, stat.S_IRUSR | stat.S_IWUSR, dir_fd=f)
s = posix.stat(support.TESTFN)
self.assertEqual(s[0] & stat.S_IRWXU, stat.S_IRUSR | stat.S_IWUSR)
w_końcu:
posix.close(f)
@unittest.skipUnless(os.chown w os.supports_dir_fd, "test needs dir_fd support w os.chown()")
def test_chown_dir_fd(self):
support.unlink(support.TESTFN)
support.create_empty_file(support.TESTFN)
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
posix.chown(support.TESTFN, os.getuid(), os.getgid(), dir_fd=f)
w_końcu:
posix.close(f)
@unittest.skipUnless(os.stat w os.supports_dir_fd, "test needs dir_fd support w os.stat()")
def test_stat_dir_fd(self):
support.unlink(support.TESTFN)
przy open(support.TESTFN, 'w') jako outfile:
outfile.write("testline\n")
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
s1 = posix.stat(support.TESTFN)
s2 = posix.stat(support.TESTFN, dir_fd=f)
self.assertEqual(s1, s2)
s2 = posix.stat(support.TESTFN, dir_fd=Nic)
self.assertEqual(s1, s2)
self.assertRaisesRegex(TypeError, 'should be integer, not',
posix.stat, support.TESTFN, dir_fd=posix.getcwd())
self.assertRaisesRegex(TypeError, 'should be integer, not',
posix.stat, support.TESTFN, dir_fd=float(f))
self.assertRaises(OverflowError,
posix.stat, support.TESTFN, dir_fd=10**20)
w_końcu:
posix.close(f)
@unittest.skipUnless(os.utime w os.supports_dir_fd, "test needs dir_fd support w os.utime()")
def test_utime_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
now = time.time()
posix.utime(support.TESTFN, Nic, dir_fd=f)
posix.utime(support.TESTFN, dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, now, dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (Nic, Nic), dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, Nic), dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (Nic, now), dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, "x"), dir_fd=f)
posix.utime(support.TESTFN, (int(now), int(now)), dir_fd=f)
posix.utime(support.TESTFN, (now, now), dir_fd=f)
posix.utime(support.TESTFN,
(int(now), int((now - int(now)) * 1e9)), dir_fd=f)
posix.utime(support.TESTFN, dir_fd=f,
times=(int(now), int((now - int(now)) * 1e9)))
jeżeli os.utime w os.supports_follow_symlinks:
spróbuj:
posix.utime(support.TESTFN, follow_symlinks=Nieprawda, dir_fd=f)
wyjąwszy ValueError:
dalej
w_końcu:
posix.close(f)
@unittest.skipUnless(os.link w os.supports_dir_fd, "test needs dir_fd support w os.link()")
def test_link_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
posix.link(support.TESTFN, support.TESTFN + 'link', src_dir_fd=f, dst_dir_fd=f)
self.assertEqual(posix.stat(support.TESTFN)[1],
posix.stat(support.TESTFN + 'link')[1])
w_końcu:
posix.close(f)
support.unlink(support.TESTFN + 'link')
@unittest.skipUnless(os.mkdir w os.supports_dir_fd, "test needs dir_fd support w os.mkdir()")
def test_mkdir_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
posix.mkdir(support.TESTFN + 'dir', dir_fd=f)
posix.stat(support.TESTFN + 'dir')
w_końcu:
posix.close(f)
support.rmtree(support.TESTFN + 'dir')
@unittest.skipUnless((os.mknod w os.supports_dir_fd) oraz hasattr(stat, 'S_IFIFO'),
"test requires both stat.S_IFIFO oraz dir_fd support dla os.mknod()")
def test_mknod_dir_fd(self):
support.unlink(support.TESTFN)
mode = stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
posix.mknod(support.TESTFN, mode, 0, dir_fd=f)
wyjąwszy OSError jako e:
# mknod(), albo only support creating device nodes.
self.assertIn(e.errno, (errno.EPERM, errno.EINVAL))
inaczej:
self.assertPrawda(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
w_końcu:
posix.close(f)
@unittest.skipUnless(os.open w os.supports_dir_fd, "test needs dir_fd support w os.open()")
def test_open_dir_fd(self):
support.unlink(support.TESTFN)
przy open(support.TESTFN, 'w') jako outfile:
outfile.write("testline\n")
a = posix.open(posix.getcwd(), posix.O_RDONLY)
b = posix.open(support.TESTFN, posix.O_RDONLY, dir_fd=a)
spróbuj:
res = posix.read(b, 9).decode(encoding="utf-8")
self.assertEqual("testline\n", res)
w_końcu:
posix.close(a)
posix.close(b)
@unittest.skipUnless(os.readlink w os.supports_dir_fd, "test needs dir_fd support w os.readlink()")
def test_readlink_dir_fd(self):
os.symlink(support.TESTFN, support.TESTFN + 'link')
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
self.assertEqual(posix.readlink(support.TESTFN + 'link'),
posix.readlink(support.TESTFN + 'link', dir_fd=f))
w_końcu:
support.unlink(support.TESTFN + 'link')
posix.close(f)
@unittest.skipUnless(os.rename w os.supports_dir_fd, "test needs dir_fd support w os.rename()")
def test_rename_dir_fd(self):
support.unlink(support.TESTFN)
support.create_empty_file(support.TESTFN + 'ren')
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
posix.rename(support.TESTFN + 'ren', support.TESTFN, src_dir_fd=f, dst_dir_fd=f)
wyjąwszy:
posix.rename(support.TESTFN + 'ren', support.TESTFN)
podnieś
inaczej:
posix.stat(support.TESTFN) # should nie podnieś exception
w_końcu:
posix.close(f)
@unittest.skipUnless(os.symlink w os.supports_dir_fd, "test needs dir_fd support w os.symlink()")
def test_symlink_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
posix.symlink(support.TESTFN, support.TESTFN + 'link', dir_fd=f)
self.assertEqual(posix.readlink(support.TESTFN + 'link'), support.TESTFN)
w_końcu:
posix.close(f)
support.unlink(support.TESTFN + 'link')
@unittest.skipUnless(os.unlink w os.supports_dir_fd, "test needs dir_fd support w os.unlink()")
def test_unlink_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
support.create_empty_file(support.TESTFN + 'del')
posix.stat(support.TESTFN + 'del') # should nie podnieś exception
spróbuj:
posix.unlink(support.TESTFN + 'del', dir_fd=f)
wyjąwszy:
support.unlink(support.TESTFN + 'del')
podnieś
inaczej:
self.assertRaises(OSError, posix.stat, support.TESTFN + 'link')
w_końcu:
posix.close(f)
@unittest.skipUnless(os.mkfifo w os.supports_dir_fd, "test needs dir_fd support w os.mkfifo()")
def test_mkfifo_dir_fd(self):
support.unlink(support.TESTFN)
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
posix.mkfifo(support.TESTFN, stat.S_IRUSR | stat.S_IWUSR, dir_fd=f)
self.assertPrawda(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
w_końcu:
posix.close(f)
requires_sched_h = unittest.skipUnless(hasattr(posix, 'sched_uzyskaj'),
"don't have scheduling support")
requires_sched_affinity = unittest.skipUnless(hasattr(posix, 'sched_setaffinity'),
"don't have sched affinity support")
@requires_sched_h
def test_sched_uzyskaj(self):
# This has no error conditions (at least on Linux).
posix.sched_uzyskaj()
@requires_sched_h
@unittest.skipUnless(hasattr(posix, 'sched_get_priority_max'),
"requires sched_get_priority_max()")
def test_sched_priority(self):
# Round-robin usually has interesting priorities.
pol = posix.SCHED_RR
lo = posix.sched_get_priority_min(pol)
hi = posix.sched_get_priority_max(pol)
self.assertIsInstance(lo, int)
self.assertIsInstance(hi, int)
self.assertGreaterEqual(hi, lo)
# OSX evidently just returns 15 without checking the argument.
jeżeli sys.platform != "darwin":
self.assertRaises(OSError, posix.sched_get_priority_min, -23)
self.assertRaises(OSError, posix.sched_get_priority_max, -23)
@unittest.skipUnless(hasattr(posix, 'sched_setscheduler'), "can't change scheduler")
def test_get_and_set_scheduler_and_param(self):
possible_schedulers = [sched dla name, sched w posix.__dict__.items()
jeżeli name.startswith("SCHED_")]
mine = posix.sched_getscheduler(0)
self.assertIn(mine, possible_schedulers)
spróbuj:
parent = posix.sched_getscheduler(os.getppid())
wyjąwszy OSError jako e:
jeżeli e.errno != errno.EPERM:
podnieś
inaczej:
self.assertIn(parent, possible_schedulers)
self.assertRaises(OSError, posix.sched_getscheduler, -1)
self.assertRaises(OSError, posix.sched_getparam, -1)
param = posix.sched_getparam(0)
self.assertIsInstance(param.sched_priority, int)
jeżeli nie sys.platform.startswith(('freebsd', 'netbsd')):
spróbuj:
posix.sched_setscheduler(0, mine, param)
posix.sched_setparam(0, param)
wyjąwszy OSError jako e:
jeżeli e.errno != errno.EPERM:
podnieś
self.assertRaises(OSError, posix.sched_setparam, -1, param)
self.assertRaises(OSError, posix.sched_setscheduler, -1, mine, param)
self.assertRaises(TypeError, posix.sched_setscheduler, 0, mine, Nic)
self.assertRaises(TypeError, posix.sched_setparam, 0, 43)
param = posix.sched_param(Nic)
self.assertRaises(TypeError, posix.sched_setparam, 0, param)
large = 214748364700
param = posix.sched_param(large)
self.assertRaises(OverflowError, posix.sched_setparam, 0, param)
param = posix.sched_param(sched_priority=-large)
self.assertRaises(OverflowError, posix.sched_setparam, 0, param)
@unittest.skipUnless(hasattr(posix, "sched_rr_get_interval"), "no function")
def test_sched_rr_get_interval(self):
spróbuj:
interval = posix.sched_rr_get_interval(0)
wyjąwszy OSError jako e:
jeżeli e.errno != errno.EINVAL:
podnieś
self.skipTest("only works on SCHED_RR processes")
self.assertIsInstance(interval, float)
self.assertGreaterEqual(interval, 0.)
self.assertLess(interval, 1.)
@requires_sched_affinity
def test_sched_getaffinity(self):
mask = posix.sched_getaffinity(0)
self.assertIsInstance(mask, set)
self.assertGreaterEqual(len(mask), 1)
self.assertRaises(OSError, posix.sched_getaffinity, -1)
dla cpu w mask:
self.assertIsInstance(cpu, int)
self.assertGreaterEqual(cpu, 0)
self.assertLess(cpu, 1 << 32)
@requires_sched_affinity
def test_sched_setaffinity(self):
mask = posix.sched_getaffinity(0)
jeżeli len(mask) > 1:
mask.pop()
posix.sched_setaffinity(0, mask)
self.assertEqual(posix.sched_getaffinity(0), mask)
self.assertRaises(OSError, posix.sched_setaffinity, 0, [])
self.assertRaises(ValueError, posix.sched_setaffinity, 0, [-10])
self.assertRaises(OverflowError, posix.sched_setaffinity, 0, [1<<128])
self.assertRaises(OSError, posix.sched_setaffinity, -1, mask)
def test_rtld_constants(self):
posix.RTLD_LAZY
posix.RTLD_NOW
posix.RTLD_GLOBAL
posix.RTLD_LOCAL
@unittest.skipUnless(hasattr(os, 'SEEK_HOLE'),
"test needs an OS that reports file holes")
def test_fs_holes(self):
# jeżeli the OS supports it the SEEK_* constants
# will be defined oraz will have a consistent
# behaviour:
# os.SEEK_DATA = current position
# os.SEEK_HOLE = end of file position
przy open(support.TESTFN, 'r+b') jako fp:
fp.write(b"hello")
fp.flush()
size = fp.tell()
fno = fp.fileno()
try :
dla i w range(size):
self.assertEqual(i, os.lseek(fno, i, os.SEEK_DATA))
self.assertLessEqual(size, os.lseek(fno, i, os.SEEK_HOLE))
self.assertRaises(OSError, os.lseek, fno, size, os.SEEK_DATA)
self.assertRaises(OSError, os.lseek, fno, size, os.SEEK_HOLE)
wyjąwszy OSError :
# Some OSs claim to support SEEK_HOLE/SEEK_DATA
# but it jest nie true.
# For instance:
# http://lists.freebsd.org/pipermail/freebsd-amd64/2012-January/014332.html
podnieś unittest.SkipTest("OSError podnieśd!")
def test_path_error2(self):
"""
Test functions that call path_error2(), providing two filenames w their exceptions.
"""
dla name w ("rename", "replace", "link"):
function = getattr(os, name, Nic)
jeżeli function jest Nic:
kontynuuj
dla dst w ("noodly2", support.TESTFN):
spróbuj:
function('doesnotexistfilename', dst)
wyjąwszy OSError jako e:
self.assertIn("'doesnotexistfilename' -> '{}'".format(dst), str(e))
przerwij
inaczej:
self.fail("No valid path_error2() test dla os." + name)
def test_path_with_null_character(self):
fn = support.TESTFN
fn_with_NUL = fn + '\0'
self.addCleanup(support.unlink, fn)
support.unlink(fn)
fd = Nic
spróbuj:
przy self.assertRaises(ValueError):
fd = os.open(fn_with_NUL, os.O_WRONLY | os.O_CREAT) # podnieśs
w_końcu:
jeżeli fd jest nie Nic:
os.close(fd)
self.assertNieprawda(os.path.exists(fn))
self.assertRaises(ValueError, os.mkdir, fn_with_NUL)
self.assertNieprawda(os.path.exists(fn))
open(fn, 'wb').close()
self.assertRaises(ValueError, os.stat, fn_with_NUL)
def test_path_with_null_byte(self):
fn = os.fsencode(support.TESTFN)
fn_with_NUL = fn + b'\0'
self.addCleanup(support.unlink, fn)
support.unlink(fn)
fd = Nic
spróbuj:
przy self.assertRaises(ValueError):
fd = os.open(fn_with_NUL, os.O_WRONLY | os.O_CREAT) # podnieśs
w_końcu:
jeżeli fd jest nie Nic:
os.close(fd)
self.assertNieprawda(os.path.exists(fn))
self.assertRaises(ValueError, os.mkdir, fn_with_NUL)
self.assertNieprawda(os.path.exists(fn))
open(fn, 'wb').close()
self.assertRaises(ValueError, os.stat, fn_with_NUL)
klasa PosixGroupsTester(unittest.TestCase):
def setUp(self):
jeżeli posix.getuid() != 0:
podnieś unittest.SkipTest("not enough privileges")
jeżeli nie hasattr(posix, 'getgroups'):
podnieś unittest.SkipTest("need posix.getgroups")
jeżeli sys.platform == 'darwin':
podnieś unittest.SkipTest("getgroups(2) jest broken on OSX")
self.saved_groups = posix.getgroups()
def tearDown(self):
jeżeli hasattr(posix, 'setgroups'):
posix.setgroups(self.saved_groups)
albo_inaczej hasattr(posix, 'initgroups'):
name = pwd.getpwuid(posix.getuid()).pw_name
posix.initgroups(name, self.saved_groups[0])
@unittest.skipUnless(hasattr(posix, 'initgroups'),
"test needs posix.initgroups()")
def test_initgroups(self):
# find missing group
g = max(self.saved_groups albo [0]) + 1
name = pwd.getpwuid(posix.getuid()).pw_name
posix.initgroups(name, g)
self.assertIn(g, posix.getgroups())
@unittest.skipUnless(hasattr(posix, 'setgroups'),
"test needs posix.setgroups()")
def test_setgroups(self):
dla groups w [[0], list(range(16))]:
posix.setgroups(groups)
self.assertListEqual(groups, posix.getgroups())
def test_main():
spróbuj:
support.run_unittest(PosixTester, PosixGroupsTester)
w_końcu:
support.reap_children()
jeżeli __name__ == '__main__':
test_main()
| false
| true
|
f7178ca40551efa85c6e00fccc36031532a0273f
| 2,864
|
py
|
Python
|
DataManagement.py
|
DonCammne/OpenSeesPyAssistant
|
f380f0f2a2f3d1336320bd8d26fa5efe00a12134
|
[
"MIT"
] | null | null | null |
DataManagement.py
|
DonCammne/OpenSeesPyAssistant
|
f380f0f2a2f3d1336320bd8d26fa5efe00a12134
|
[
"MIT"
] | null | null | null |
DataManagement.py
|
DonCammne/OpenSeesPyAssistant
|
f380f0f2a2f3d1336320bd8d26fa5efe00a12134
|
[
"MIT"
] | null | null | null |
"""
Module with the parent abstract class DataManagement. \n
Carmine Schipani, 2021
"""
from abc import ABC, abstractmethod
from OpenSeesPyAssistant.ErrorHandling import *
import numpy as np
class DataManagement(ABC):
"""
Abstract parent class for data management.
Using the associated MATLAB class \n
LOAD_CLASS.m \n
for the postprocessing in MATLAB, allowing for simpler and more reliable data management because the parameters
from the OpenSeesPy analysis are imported automatically.
"""
def SaveData(self, f):
"""
Function that lists in the command window and saves in a opened file text "f" the data from the "self" class that calls it.
Example: call this function after this line: \n
with open(FileName, 'w') as f:
@param f (io.TextIOWrapper): Opened file to write into
@exception WrongDimension: The number of lists in the list self.data needs to be 2
"""
if len(self.data[0]) != 2: raise WrongDimension()
delimiter = "##############################" # 30 times #
col_delimiter = "\t" # tab
for data_line in self.data:
f.write('\n')
for col in data_line:
if type(col) == np.ndarray:
tmp_str = np.array_str(col, max_line_width = np.inf)
else:
tmp_str = str(col)
f.write(tmp_str)
f.write(col_delimiter)
f.write('\n')
f.write('NEW INFO SECTION DELIMITER \t')
f.write(delimiter)
@abstractmethod
def ShowInfo(self):
"""
Abstract method that shows the data stored in the class in the command window.
In some cases, it's possible to plot some information (for example the curve of the material model).
"""
pass
@abstractmethod
def ReInit(self):
"""
Abstract method that computes the value of the parameters with respect of the arguments. \n
Use after changing the value of argument inside the class (to update the values accordingly). \n
This function can be very useful in combination with the function "deepcopy()" from the module "copy". \n
Be careful that the parameter self.Initialized is also copied, thus it is safer to copy the class before the method that calls the actual OpenSees commands (and initialise the object).
"""
pass
@abstractmethod
def UpdateStoredData(self):
"""
Abstract method used to define and update the self.data member variable. \n
This member variable (self.data) is a list of lists with 2 entries (info_name and info_value)
and for each list is stored a different member variable of the class. \n
Useful to debug the model, export data, copy object.
"""
pass
| 39.232877
| 192
| 0.631634
|
from abc import ABC, abstractmethod
from OpenSeesPyAssistant.ErrorHandling import *
import numpy as np
class DataManagement(ABC):
def SaveData(self, f):
if len(self.data[0]) != 2: raise WrongDimension()
delimiter = "##############################" col_delimiter = "\t"
for data_line in self.data:
f.write('\n')
for col in data_line:
if type(col) == np.ndarray:
tmp_str = np.array_str(col, max_line_width = np.inf)
else:
tmp_str = str(col)
f.write(tmp_str)
f.write(col_delimiter)
f.write('\n')
f.write('NEW INFO SECTION DELIMITER \t')
f.write(delimiter)
@abstractmethod
def ShowInfo(self):
pass
@abstractmethod
def ReInit(self):
pass
@abstractmethod
def UpdateStoredData(self):
pass
| true
| true
|
f7178e22c6a7b86147f3ad4e697f29a6c67dce4f
| 826
|
py
|
Python
|
final exam 2/World Tour.py
|
DiyanKalaydzhiev23/fundamentals---python
|
7fa032d9a3270648ffa383bb00dad8e51613189d
|
[
"MIT"
] | null | null | null |
final exam 2/World Tour.py
|
DiyanKalaydzhiev23/fundamentals---python
|
7fa032d9a3270648ffa383bb00dad8e51613189d
|
[
"MIT"
] | null | null | null |
final exam 2/World Tour.py
|
DiyanKalaydzhiev23/fundamentals---python
|
7fa032d9a3270648ffa383bb00dad8e51613189d
|
[
"MIT"
] | null | null | null |
stops = list(input())
command = input().split(":")
while command[0] != "Travel":
if command[0] == "Add Stop":
if 0 <= int(command[1]) < len(stops):
index = int(command[1])
for letter in command[2]:
stops.insert(index, letter)
index += 1
elif command[0] == "Remove Stop":
if 0 <= int(command[1]) < len(stops) and 0 <= int(command[2]) < len(stops):
[stops.pop(int(command[1])) for i in range(int(command[1]), int(command[2])+1)]
elif command[0] == "Switch":
stops = ''.join(stops)
if command[1] in stops:
stops = stops.replace(command[1], command[2])
stops = list(stops)
print(''.join(stops))
command = input().split(":")
print(f"Ready for world tour! Planned stops: {''.join(stops)}")
| 35.913043
| 91
| 0.53632
|
stops = list(input())
command = input().split(":")
while command[0] != "Travel":
if command[0] == "Add Stop":
if 0 <= int(command[1]) < len(stops):
index = int(command[1])
for letter in command[2]:
stops.insert(index, letter)
index += 1
elif command[0] == "Remove Stop":
if 0 <= int(command[1]) < len(stops) and 0 <= int(command[2]) < len(stops):
[stops.pop(int(command[1])) for i in range(int(command[1]), int(command[2])+1)]
elif command[0] == "Switch":
stops = ''.join(stops)
if command[1] in stops:
stops = stops.replace(command[1], command[2])
stops = list(stops)
print(''.join(stops))
command = input().split(":")
print(f"Ready for world tour! Planned stops: {''.join(stops)}")
| true
| true
|
f7178e9db3fd873fbe00c6932d476c7f06e20608
| 3,456
|
py
|
Python
|
hydrus/core/HydrusExceptions.py
|
ReAnzu/hydrus
|
069f77e1941d13b3bdd969aeeffd7ae003fcb71e
|
[
"WTFPL"
] | 1
|
2021-02-24T22:12:30.000Z
|
2021-02-24T22:12:30.000Z
|
hydrus/core/HydrusExceptions.py
|
ReAnzu/hydrus
|
069f77e1941d13b3bdd969aeeffd7ae003fcb71e
|
[
"WTFPL"
] | null | null | null |
hydrus/core/HydrusExceptions.py
|
ReAnzu/hydrus
|
069f77e1941d13b3bdd969aeeffd7ae003fcb71e
|
[
"WTFPL"
] | null | null | null |
import collections.abc
import os
class HydrusException( Exception ):
def __str__( self ):
if isinstance( self.args, collections.abc.Iterable ):
s = []
for arg in self.args:
try:
s.append( str( arg ) )
except:
s.append( repr( arg ) )
else:
s = [ repr( self.args ) ]
return os.linesep.join( s )
class CantRenderWithCVException( HydrusException ): pass
class DataMissing( HydrusException ): pass
class DBException( HydrusException ):
def __init__( self, e, first_line, db_traceback ):
self.db_e = e
HydrusException.__init__( self, first_line, db_traceback )
class DBAccessException( HydrusException ): pass
class DBCredentialsException( HydrusException ): pass
class FileMissingException( HydrusException ): pass
class DirectoryMissingException( HydrusException ): pass
class SerialisationException( HydrusException ): pass
class NameException( HydrusException ): pass
class ShutdownException( HydrusException ): pass
class QtDeadWindowException(HydrusException): pass
class VetoException( HydrusException ): pass
class CancelledException( VetoException ): pass
class UnsupportedFileException( VetoException ): pass
class DamagedOrUnusualFileException( UnsupportedFileException ): pass
class FileSizeException( UnsupportedFileException ): pass
class DecompressionBombException( FileSizeException ): pass
class TagSizeException( VetoException ): pass
class ParseException( HydrusException ): pass
class StringConvertException( ParseException ): pass
class StringMatchException( ParseException ): pass
class StringSplitterException( ParseException ): pass
class URLClassException( ParseException ): pass
class GUGException( ParseException ): pass
class NetworkException( HydrusException ): pass
class NetworkInfrastructureException( NetworkException ): pass
class ConnectionException( NetworkInfrastructureException ): pass
class FirewallException( NetworkInfrastructureException ): pass
class RouterException( NetworkInfrastructureException ): pass
class CloudFlareException( NetworkInfrastructureException ): pass
class BandwidthException( NetworkInfrastructureException ): pass
class ServerException( NetworkInfrastructureException ): pass
class ServerBusyException( NetworkInfrastructureException ): pass
class StreamTimeoutException( NetworkException ): pass
class NetworkVersionException( NetworkException ): pass
class NoContentException( NetworkException ): pass
class NotFoundException( NetworkException ): pass
class NotModifiedException( NetworkException ): pass
class BadRequestException( NetworkException ): pass
class ConflictException( NetworkException ): pass
class MissingCredentialsException( NetworkException ): pass
class DoesNotSupportCORSException( NetworkException ): pass
class InsufficientCredentialsException( NetworkException ): pass
class RedirectionException( NetworkException ): pass
class SessionException( NetworkException ): pass
class WrongServiceTypeException( NetworkException ): pass
class ValidationException( NetworkException ): pass
class ShouldReattemptNetworkException( NetworkException ): pass
| 35.265306
| 69
| 0.732928
|
import collections.abc
import os
class HydrusException( Exception ):
def __str__( self ):
if isinstance( self.args, collections.abc.Iterable ):
s = []
for arg in self.args:
try:
s.append( str( arg ) )
except:
s.append( repr( arg ) )
else:
s = [ repr( self.args ) ]
return os.linesep.join( s )
class CantRenderWithCVException( HydrusException ): pass
class DataMissing( HydrusException ): pass
class DBException( HydrusException ):
def __init__( self, e, first_line, db_traceback ):
self.db_e = e
HydrusException.__init__( self, first_line, db_traceback )
class DBAccessException( HydrusException ): pass
class DBCredentialsException( HydrusException ): pass
class FileMissingException( HydrusException ): pass
class DirectoryMissingException( HydrusException ): pass
class SerialisationException( HydrusException ): pass
class NameException( HydrusException ): pass
class ShutdownException( HydrusException ): pass
class QtDeadWindowException(HydrusException): pass
class VetoException( HydrusException ): pass
class CancelledException( VetoException ): pass
class UnsupportedFileException( VetoException ): pass
class DamagedOrUnusualFileException( UnsupportedFileException ): pass
class FileSizeException( UnsupportedFileException ): pass
class DecompressionBombException( FileSizeException ): pass
class TagSizeException( VetoException ): pass
class ParseException( HydrusException ): pass
class StringConvertException( ParseException ): pass
class StringMatchException( ParseException ): pass
class StringSplitterException( ParseException ): pass
class URLClassException( ParseException ): pass
class GUGException( ParseException ): pass
class NetworkException( HydrusException ): pass
class NetworkInfrastructureException( NetworkException ): pass
class ConnectionException( NetworkInfrastructureException ): pass
class FirewallException( NetworkInfrastructureException ): pass
class RouterException( NetworkInfrastructureException ): pass
class CloudFlareException( NetworkInfrastructureException ): pass
class BandwidthException( NetworkInfrastructureException ): pass
class ServerException( NetworkInfrastructureException ): pass
class ServerBusyException( NetworkInfrastructureException ): pass
class StreamTimeoutException( NetworkException ): pass
class NetworkVersionException( NetworkException ): pass
class NoContentException( NetworkException ): pass
class NotFoundException( NetworkException ): pass
class NotModifiedException( NetworkException ): pass
class BadRequestException( NetworkException ): pass
class ConflictException( NetworkException ): pass
class MissingCredentialsException( NetworkException ): pass
class DoesNotSupportCORSException( NetworkException ): pass
class InsufficientCredentialsException( NetworkException ): pass
class RedirectionException( NetworkException ): pass
class SessionException( NetworkException ): pass
class WrongServiceTypeException( NetworkException ): pass
class ValidationException( NetworkException ): pass
class ShouldReattemptNetworkException( NetworkException ): pass
| true
| true
|
f7178ea0d3c97e41fedd16e6e263162427023912
| 8,139
|
py
|
Python
|
synapse/appservice/__init__.py
|
mweinelt/synapse
|
42a9ea37e4c6ff9d91b530c40d366446b9fc2234
|
[
"Apache-2.0"
] | null | null | null |
synapse/appservice/__init__.py
|
mweinelt/synapse
|
42a9ea37e4c6ff9d91b530c40d366446b9fc2234
|
[
"Apache-2.0"
] | null | null | null |
synapse/appservice/__init__.py
|
mweinelt/synapse
|
42a9ea37e4c6ff9d91b530c40d366446b9fc2234
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.api.constants import EventTypes
import logging
import re
logger = logging.getLogger(__name__)
class ApplicationServiceState(object):
DOWN = "down"
UP = "up"
class AppServiceTransaction(object):
"""Represents an application service transaction."""
def __init__(self, service, id, events):
self.service = service
self.id = id
self.events = events
def send(self, as_api):
"""Sends this transaction using the provided AS API interface.
Args:
as_api(ApplicationServiceApi): The API to use to send.
Returns:
A Deferred which resolves to True if the transaction was sent.
"""
return as_api.push_bulk(
service=self.service,
events=self.events,
txn_id=self.id
)
def complete(self, store):
"""Completes this transaction as successful.
Marks this transaction ID on the application service and removes the
transaction contents from the database.
Args:
store: The database store to operate on.
Returns:
A Deferred which resolves to True if the transaction was completed.
"""
return store.complete_appservice_txn(
service=self.service,
txn_id=self.id
)
class ApplicationService(object):
"""Defines an application service. This definition is mostly what is
provided to the /register AS API.
Provides methods to check if this service is "interested" in events.
"""
NS_USERS = "users"
NS_ALIASES = "aliases"
NS_ROOMS = "rooms"
# The ordering here is important as it is used to map database values (which
# are stored as ints representing the position in this list) to namespace
# values.
NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS]
def __init__(self, token, url=None, namespaces=None, hs_token=None,
sender=None, id=None):
self.token = token
self.url = url
self.hs_token = hs_token
self.sender = sender
self.namespaces = self._check_namespaces(namespaces)
self.id = id
def _check_namespaces(self, namespaces):
# Sanity check that it is of the form:
# {
# users: [ {regex: "[A-z]+.*", exclusive: true}, ...],
# aliases: [ {regex: "[A-z]+.*", exclusive: true}, ...],
# rooms: [ {regex: "[A-z]+.*", exclusive: true}, ...],
# }
if not namespaces:
namespaces = {}
for ns in ApplicationService.NS_LIST:
if ns not in namespaces:
namespaces[ns] = []
continue
if type(namespaces[ns]) != list:
raise ValueError("Bad namespace value for '%s'" % ns)
for regex_obj in namespaces[ns]:
if not isinstance(regex_obj, dict):
raise ValueError("Expected dict regex for ns '%s'" % ns)
if not isinstance(regex_obj.get("exclusive"), bool):
raise ValueError(
"Expected bool for 'exclusive' in ns '%s'" % ns
)
if not isinstance(regex_obj.get("regex"), basestring):
raise ValueError(
"Expected string for 'regex' in ns '%s'" % ns
)
return namespaces
def _matches_regex(self, test_string, namespace_key, return_obj=False):
if not isinstance(test_string, basestring):
logger.error(
"Expected a string to test regex against, but got %s",
test_string
)
return False
for regex_obj in self.namespaces[namespace_key]:
if re.match(regex_obj["regex"], test_string):
if return_obj:
return regex_obj
return True
return False
def _is_exclusive(self, ns_key, test_string):
regex_obj = self._matches_regex(test_string, ns_key, return_obj=True)
if regex_obj:
return regex_obj["exclusive"]
return False
def _matches_user(self, event, member_list):
if (hasattr(event, "sender") and
self.is_interested_in_user(event.sender)):
return True
# also check m.room.member state key
if (hasattr(event, "type") and event.type == EventTypes.Member
and hasattr(event, "state_key")
and self.is_interested_in_user(event.state_key)):
return True
# check joined member events
for user_id in member_list:
if self.is_interested_in_user(user_id):
return True
return False
def _matches_room_id(self, event):
if hasattr(event, "room_id"):
return self.is_interested_in_room(event.room_id)
return False
def _matches_aliases(self, event, alias_list):
for alias in alias_list:
if self.is_interested_in_alias(alias):
return True
return False
def is_interested(self, event, restrict_to=None, aliases_for_event=None,
member_list=None):
"""Check if this service is interested in this event.
Args:
event(Event): The event to check.
restrict_to(str): The namespace to restrict regex tests to.
aliases_for_event(list): A list of all the known room aliases for
this event.
member_list(list): A list of all joined user_ids in this room.
Returns:
bool: True if this service would like to know about this event.
"""
if aliases_for_event is None:
aliases_for_event = []
if member_list is None:
member_list = []
if restrict_to and restrict_to not in ApplicationService.NS_LIST:
# this is a programming error, so fail early and raise a general
# exception
raise Exception("Unexpected restrict_to value: %s". restrict_to)
if not restrict_to:
return (self._matches_user(event, member_list)
or self._matches_aliases(event, aliases_for_event)
or self._matches_room_id(event))
elif restrict_to == ApplicationService.NS_ALIASES:
return self._matches_aliases(event, aliases_for_event)
elif restrict_to == ApplicationService.NS_ROOMS:
return self._matches_room_id(event)
elif restrict_to == ApplicationService.NS_USERS:
return self._matches_user(event, member_list)
def is_interested_in_user(self, user_id):
return (
self._matches_regex(user_id, ApplicationService.NS_USERS)
or user_id == self.sender
)
def is_interested_in_alias(self, alias):
return self._matches_regex(alias, ApplicationService.NS_ALIASES)
def is_interested_in_room(self, room_id):
return self._matches_regex(room_id, ApplicationService.NS_ROOMS)
def is_exclusive_user(self, user_id):
return (
self._is_exclusive(ApplicationService.NS_USERS, user_id)
or user_id == self.sender
)
def is_exclusive_alias(self, alias):
return self._is_exclusive(ApplicationService.NS_ALIASES, alias)
def is_exclusive_room(self, room_id):
return self._is_exclusive(ApplicationService.NS_ROOMS, room_id)
def __str__(self):
return "ApplicationService: %s" % (self.__dict__,)
| 35.854626
| 80
| 0.6158
|
from synapse.api.constants import EventTypes
import logging
import re
logger = logging.getLogger(__name__)
class ApplicationServiceState(object):
DOWN = "down"
UP = "up"
class AppServiceTransaction(object):
def __init__(self, service, id, events):
self.service = service
self.id = id
self.events = events
def send(self, as_api):
return as_api.push_bulk(
service=self.service,
events=self.events,
txn_id=self.id
)
def complete(self, store):
return store.complete_appservice_txn(
service=self.service,
txn_id=self.id
)
class ApplicationService(object):
NS_USERS = "users"
NS_ALIASES = "aliases"
NS_ROOMS = "rooms"
NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS]
def __init__(self, token, url=None, namespaces=None, hs_token=None,
sender=None, id=None):
self.token = token
self.url = url
self.hs_token = hs_token
self.sender = sender
self.namespaces = self._check_namespaces(namespaces)
self.id = id
def _check_namespaces(self, namespaces):
if not namespaces:
namespaces = {}
for ns in ApplicationService.NS_LIST:
if ns not in namespaces:
namespaces[ns] = []
continue
if type(namespaces[ns]) != list:
raise ValueError("Bad namespace value for '%s'" % ns)
for regex_obj in namespaces[ns]:
if not isinstance(regex_obj, dict):
raise ValueError("Expected dict regex for ns '%s'" % ns)
if not isinstance(regex_obj.get("exclusive"), bool):
raise ValueError(
"Expected bool for 'exclusive' in ns '%s'" % ns
)
if not isinstance(regex_obj.get("regex"), basestring):
raise ValueError(
"Expected string for 'regex' in ns '%s'" % ns
)
return namespaces
def _matches_regex(self, test_string, namespace_key, return_obj=False):
if not isinstance(test_string, basestring):
logger.error(
"Expected a string to test regex against, but got %s",
test_string
)
return False
for regex_obj in self.namespaces[namespace_key]:
if re.match(regex_obj["regex"], test_string):
if return_obj:
return regex_obj
return True
return False
def _is_exclusive(self, ns_key, test_string):
regex_obj = self._matches_regex(test_string, ns_key, return_obj=True)
if regex_obj:
return regex_obj["exclusive"]
return False
def _matches_user(self, event, member_list):
if (hasattr(event, "sender") and
self.is_interested_in_user(event.sender)):
return True
if (hasattr(event, "type") and event.type == EventTypes.Member
and hasattr(event, "state_key")
and self.is_interested_in_user(event.state_key)):
return True
for user_id in member_list:
if self.is_interested_in_user(user_id):
return True
return False
def _matches_room_id(self, event):
if hasattr(event, "room_id"):
return self.is_interested_in_room(event.room_id)
return False
def _matches_aliases(self, event, alias_list):
for alias in alias_list:
if self.is_interested_in_alias(alias):
return True
return False
def is_interested(self, event, restrict_to=None, aliases_for_event=None,
member_list=None):
if aliases_for_event is None:
aliases_for_event = []
if member_list is None:
member_list = []
if restrict_to and restrict_to not in ApplicationService.NS_LIST:
raise Exception("Unexpected restrict_to value: %s". restrict_to)
if not restrict_to:
return (self._matches_user(event, member_list)
or self._matches_aliases(event, aliases_for_event)
or self._matches_room_id(event))
elif restrict_to == ApplicationService.NS_ALIASES:
return self._matches_aliases(event, aliases_for_event)
elif restrict_to == ApplicationService.NS_ROOMS:
return self._matches_room_id(event)
elif restrict_to == ApplicationService.NS_USERS:
return self._matches_user(event, member_list)
def is_interested_in_user(self, user_id):
return (
self._matches_regex(user_id, ApplicationService.NS_USERS)
or user_id == self.sender
)
def is_interested_in_alias(self, alias):
return self._matches_regex(alias, ApplicationService.NS_ALIASES)
def is_interested_in_room(self, room_id):
return self._matches_regex(room_id, ApplicationService.NS_ROOMS)
def is_exclusive_user(self, user_id):
return (
self._is_exclusive(ApplicationService.NS_USERS, user_id)
or user_id == self.sender
)
def is_exclusive_alias(self, alias):
return self._is_exclusive(ApplicationService.NS_ALIASES, alias)
def is_exclusive_room(self, room_id):
return self._is_exclusive(ApplicationService.NS_ROOMS, room_id)
def __str__(self):
return "ApplicationService: %s" % (self.__dict__,)
| true
| true
|
f717904cee8dca9e6d25f6ea498b85acd973ab00
| 8,645
|
py
|
Python
|
src/extractor/make_bb_info_mats.py
|
lonelu/Metalprot_learning
|
8edb2c3e4f6ba129a409d75fd4d15ceb3a9e307b
|
[
"MIT"
] | null | null | null |
src/extractor/make_bb_info_mats.py
|
lonelu/Metalprot_learning
|
8edb2c3e4f6ba129a409d75fd4d15ceb3a9e307b
|
[
"MIT"
] | null | null | null |
src/extractor/make_bb_info_mats.py
|
lonelu/Metalprot_learning
|
8edb2c3e4f6ba129a409d75fd4d15ceb3a9e307b
|
[
"MIT"
] | null | null | null |
from numpy.core.numeric import full
from numpy.lib.function_base import append
import prody as pr
import os
import numpy
import matplotlib as mpl
import pylab
from itertools import combinations, combinations_with_replacement
from docopt import docopt
import itertools
import pickle
import sys
from scipy.linalg.basic import matrix_balance
from scipy.spatial.distance import cdist
from . import ligand_database as ld
from . import features_pdb2dihe as fpdh
metal_sel = 'ion or name NI MN ZN CO CU MG FE'
#TO DO: create artificial aa in the 4th aa.
def get_atg(full_pdb):
'''
prody atomgroup will be used to calc bb info.
If the contact aa is at terminal, then the shape of the dist matrix will be < 12. So contact aa will be copied and added.
'''
metal = full_pdb.select(metal_sel)[0]
contact_aas = full_pdb.select('protein and not carbon and not hydrogen and within 2.83 of resindex ' + str(metal.getResindex()))
contact_aa_resinds = numpy.unique(contact_aas.getResindices())
extention = 1
coords = []
resnames = []
names = []
resnums = []
resn = 1
for resind in contact_aa_resinds:
ext_inds = ld.extend_res_indices([resind], full_pdb, extend =extention)
#In some cases, the contact aa is at terminal. We can add more aa to match the shape.
if len(ext_inds) == 2:
if ext_inds[0] == resind:
ext_inds.insert(0, resind)
else:
ext_inds.append(resind)
if len(ext_inds) == 1:
ext_inds.append(resind)
ext_inds.append(resind)
for ind in ext_inds:
aa = full_pdb.select('resindex ' + str(ind))
coords.extend(aa.getCoords())
resnames.extend(aa.getResnames())
names.extend(aa.getNames())
resnums.extend([resn for _i in range(len(aa))])
resn += 1
if len(contact_aa_resinds) == 3:
coords.extend([])
resnames.extend([])
names.extend([])
resnums.extend([])
#ag = pr.AtomGroup('-'.join([str(p) for p in per]))
ag = pr.AtomGroup('0-1-2-3')
ag.setCoords(coords)
ag.setResnums(resnums)
ag.setResnames(resnames)
ag.setNames(names)
return ag
def get_atgs(full_pdb, contain_metal = True):
'''
prody atomgroup will be used to calc bb info.
If the contact aa is at terminal, then the shape of the dist matrix will be < 12. So contact aa will be copied and added.
'''
if contain_metal:
metal = full_pdb.select(metal_sel)[0]
contact_aas = full_pdb.select('protein and not carbon and not hydrogen and within 2.83 of resindex ' + str(metal.getResindex()))
else:
#TO DO: it is not quite right here if the pdb happened to have more HIS-CYS-GLU-ASP. Skip now.
contact_aas = full_pdb.select('resname HIS CYS GLU ASP')
if not contact_aas and len(numpy.unique(contact_aas.getResindices())) > 4:
return []
contact_aa_resinds = numpy.unique(contact_aas.getResindices())
extention = 1
# TO DO: If the len of contact_ass is not 4...
ags = []
#for per in itertools.permutations(range(len(contact_aa_resinds))):
for per in [range(len(contact_aa_resinds))]:
print(per)
coords = []
resnames = []
names = []
resnums = []
resn = 1
for idx in per:
resind = contact_aa_resinds[idx]
ext_inds = ld.extend_res_indices([resind], full_pdb, extend =extention)
#In some cases, the contact aa is at terminal. We can add more aa to match the shape.
if len(ext_inds) == 2:
if ext_inds[0] == resind:
ext_inds.insert(0, resind)
else:
ext_inds.append(resind)
if len(ext_inds) == 1:
ext_inds.append(resind)
ext_inds.append(resind)
for ind in ext_inds:
aa = full_pdb.select('resindex ' + str(ind))
coords.extend(aa.getCoords())
resnames.extend(aa.getResnames())
names.extend(aa.getNames())
resnums.extend([resn for _i in range(len(aa))])
resn += 1
ag = pr.AtomGroup('-'.join([str(p) for p in per]))
ag.setCoords(coords)
ag.setResnums(resnums)
ag.setResnames(resnames)
ag.setNames(names)
ags.append(ag)
return ags
def get_bb_dist_seq(core):
'''
If we know N CA C, The coords of CB could be calcualted. So we may not need CB coords.
'''
n_coords = core.select('name N').getCoords()
c_coords = core.select('name C').getCoords()
ca_coords = core.select('name CA').getCoords()
n_n = cdist(n_coords, n_coords)
c_c = cdist(c_coords, c_coords)
ca_ca = cdist(ca_coords, ca_coords)
cb_coords = []
for i in range(len(n_coords)):
Ca = ca_coords[i]
C = c_coords[i]
N = n_coords[i]
b = Ca - N
c = C - Ca
a = numpy.cross(b, c)
Cb = -0.58273431*a + 0.56802827*b - 0.54067466*c + Ca
cb_coords.append(Cb)
cb_coords = core.select('name CB').getCoords()
cb_cb = cdist(cb_coords, cb_coords)
return n_n, c_c, ca_ca, cb_cb
def get_dihe(ag):
'''
Please check features_pdb2dihe.py.
Only the contact aa will be extracted.
'''
nres = len(ag.select('name CA'))
print(nres)
dist, _omega, _theta_asym, _phi_asym = fpdh.get_neighbors(ag, nres, 20.0)
#TO DO: extract info, only the contact aa matters?!
omega = numpy.zeros((nres, nres))
theta_asym = numpy.zeros((nres, nres))
phi_asym = numpy.zeros((nres, nres))
for i in range(1, nres, 3):
for j in range(1, nres, 3):
omega[i, j] = _omega[i, j]
theta_asym[i, j] = _theta_asym[i, j]
phi_asym[i, j] = _phi_asym[i, j]
return omega, theta_asym, phi_asym
def get_seq_mat(ag, matrix_size = 12):
seq = ag.select('name CA').getResnames()
threelettercodes = ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLU', 'GLN', 'GLY', 'HIS', 'ILE', 'LEU', 'LYS', 'MET',\
'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL']
seq_channels = numpy.zeros([40, matrix_size, matrix_size], dtype=int)
for i in range(len(seq)):
aa = seq[i]
try:
idx = threelettercodes.index(aa)
except:
print('Resname of following atom not found: {}'.format(aa))
continue
for j in range(len(seq)):
seq_channels[idx][i][j] = 1 # horizontal rows of 1's in first 20 channels
seq_channels[idx+20][j][i] = 1 # vertical columns of 1's in next 20 channels
return seq_channels
def mk_full_mats(ag, matrix_size = 12):
nres = len(ag.select('name CA'))
n_n, c_c, ca_ca, cb_cb = get_bb_dist_seq(ag)
omega, theta_asym, phi_asym = get_dihe(ag)
seq_mats = get_seq_mat(ag, matrix_size)
full_mat = numpy.zeros((47, matrix_size, matrix_size))
# Make sure the shape of each matrix is smaller than the matrix_size.
full_mat[0,0:n_n.shape[0], 0:n_n.shape[1]] = n_n
full_mat[1,0:c_c.shape[0], 0:c_c.shape[1]] = c_c
full_mat[2,0:ca_ca.shape[0], 0:ca_ca.shape[1]] = ca_ca
full_mat[3,0:cb_cb.shape[0], 0:cb_cb.shape[1]] = cb_cb
full_mat[4,0:omega.shape[0], 0:omega.shape[1]] = omega
full_mat[5,0:theta_asym.shape[0], 0:theta_asym.shape[1]] = theta_asym
full_mat[6,0:phi_asym.shape[0], 0:phi_asym.shape[1]] = phi_asym
for i in range(7, 47):
full_mat[i, :, :] = seq_mats[i - 7]
return full_mat
def write_pickle_file(full_mat, pdb, ag, out_folder, tag = ''):
"""
Writes a pickle file containing the input numpy array into the current permutation's folder.
Currently using this only to save the full matrix (all 46 channels).
"""
numpy.set_printoptions(threshold=numpy.inf)
pdb_name = pdb.split('.')[0]
pkl_file = out_folder + pdb_name + '_full_mat_' + ag.getTitle() + tag + '.pkl'
with open(pkl_file, 'wb') as f:
print(pkl_file)
pickle.dump(full_mat, f)
return
def write_dist_mat_file(mat, pdb, ag, out_folder, tag = ''):
"""
Writes out a file containing the distance matrix
"""
# output_folder = 'core_contact_maps/dist_mat_txt_folder/'
numpy.set_printoptions(threshold=numpy.inf)
dist_mat_file = pdb.split('.')[0]
dist_mat_file = out_folder + dist_mat_file + '_full_mat_' + ag.getTitle() + tag + '.txt'
with open(dist_mat_file, 'w') as open_file:
for i in mat:
open_file.write(str(i) + '\n')
return
def run_mk_bb_info_mats(workdir, out_path, mat_size = 12, top = 1000, contain_metal = True, opts = None):
os.makedirs(out_path, exist_ok=True)
count = 0
errors = ''
for pdb_name in os.listdir(workdir):
if count >= top:
break
if '.pdb' not in pdb_name:
continue
pdb_file = workdir + pdb_name
pdb = pr.parsePDB(pdb_file)
ags = get_atgs(pdb, contain_metal)
for ag in ags:
try:
#TO DO: currently, only consider 3 or 4 aa binding.
if len(ag.select('name CA'))> 12 or len(ag.select('name CA')) < 7:
print(pdb_name + ' not used. ')
continue
full_mat = mk_full_mats(ag, mat_size)
write_dist_mat_file(full_mat, pdb_name, ag, out_path)
write_pickle_file(full_mat, pdb_name, ag, out_path)
count += 1
except:
print('error: ' + pdb_name)
errors += pdb_name + '\n'
if count >= top:
break
with open(out_path + '_error.txt', 'w') as f:
f.write(errors)
return
| 25.501475
| 131
| 0.682244
|
from numpy.core.numeric import full
from numpy.lib.function_base import append
import prody as pr
import os
import numpy
import matplotlib as mpl
import pylab
from itertools import combinations, combinations_with_replacement
from docopt import docopt
import itertools
import pickle
import sys
from scipy.linalg.basic import matrix_balance
from scipy.spatial.distance import cdist
from . import ligand_database as ld
from . import features_pdb2dihe as fpdh
metal_sel = 'ion or name NI MN ZN CO CU MG FE'
def get_atg(full_pdb):
metal = full_pdb.select(metal_sel)[0]
contact_aas = full_pdb.select('protein and not carbon and not hydrogen and within 2.83 of resindex ' + str(metal.getResindex()))
contact_aa_resinds = numpy.unique(contact_aas.getResindices())
extention = 1
coords = []
resnames = []
names = []
resnums = []
resn = 1
for resind in contact_aa_resinds:
ext_inds = ld.extend_res_indices([resind], full_pdb, extend =extention)
if len(ext_inds) == 2:
if ext_inds[0] == resind:
ext_inds.insert(0, resind)
else:
ext_inds.append(resind)
if len(ext_inds) == 1:
ext_inds.append(resind)
ext_inds.append(resind)
for ind in ext_inds:
aa = full_pdb.select('resindex ' + str(ind))
coords.extend(aa.getCoords())
resnames.extend(aa.getResnames())
names.extend(aa.getNames())
resnums.extend([resn for _i in range(len(aa))])
resn += 1
if len(contact_aa_resinds) == 3:
coords.extend([])
resnames.extend([])
names.extend([])
resnums.extend([])
ag = pr.AtomGroup('0-1-2-3')
ag.setCoords(coords)
ag.setResnums(resnums)
ag.setResnames(resnames)
ag.setNames(names)
return ag
def get_atgs(full_pdb, contain_metal = True):
if contain_metal:
metal = full_pdb.select(metal_sel)[0]
contact_aas = full_pdb.select('protein and not carbon and not hydrogen and within 2.83 of resindex ' + str(metal.getResindex()))
else:
contact_aas = full_pdb.select('resname HIS CYS GLU ASP')
if not contact_aas and len(numpy.unique(contact_aas.getResindices())) > 4:
return []
contact_aa_resinds = numpy.unique(contact_aas.getResindices())
extention = 1
ags = []
for per in [range(len(contact_aa_resinds))]:
print(per)
coords = []
resnames = []
names = []
resnums = []
resn = 1
for idx in per:
resind = contact_aa_resinds[idx]
ext_inds = ld.extend_res_indices([resind], full_pdb, extend =extention)
if len(ext_inds) == 2:
if ext_inds[0] == resind:
ext_inds.insert(0, resind)
else:
ext_inds.append(resind)
if len(ext_inds) == 1:
ext_inds.append(resind)
ext_inds.append(resind)
for ind in ext_inds:
aa = full_pdb.select('resindex ' + str(ind))
coords.extend(aa.getCoords())
resnames.extend(aa.getResnames())
names.extend(aa.getNames())
resnums.extend([resn for _i in range(len(aa))])
resn += 1
ag = pr.AtomGroup('-'.join([str(p) for p in per]))
ag.setCoords(coords)
ag.setResnums(resnums)
ag.setResnames(resnames)
ag.setNames(names)
ags.append(ag)
return ags
def get_bb_dist_seq(core):
n_coords = core.select('name N').getCoords()
c_coords = core.select('name C').getCoords()
ca_coords = core.select('name CA').getCoords()
n_n = cdist(n_coords, n_coords)
c_c = cdist(c_coords, c_coords)
ca_ca = cdist(ca_coords, ca_coords)
cb_coords = []
for i in range(len(n_coords)):
Ca = ca_coords[i]
C = c_coords[i]
N = n_coords[i]
b = Ca - N
c = C - Ca
a = numpy.cross(b, c)
Cb = -0.58273431*a + 0.56802827*b - 0.54067466*c + Ca
cb_coords.append(Cb)
cb_coords = core.select('name CB').getCoords()
cb_cb = cdist(cb_coords, cb_coords)
return n_n, c_c, ca_ca, cb_cb
def get_dihe(ag):
nres = len(ag.select('name CA'))
print(nres)
dist, _omega, _theta_asym, _phi_asym = fpdh.get_neighbors(ag, nres, 20.0)
omega = numpy.zeros((nres, nres))
theta_asym = numpy.zeros((nres, nres))
phi_asym = numpy.zeros((nres, nres))
for i in range(1, nres, 3):
for j in range(1, nres, 3):
omega[i, j] = _omega[i, j]
theta_asym[i, j] = _theta_asym[i, j]
phi_asym[i, j] = _phi_asym[i, j]
return omega, theta_asym, phi_asym
def get_seq_mat(ag, matrix_size = 12):
seq = ag.select('name CA').getResnames()
threelettercodes = ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLU', 'GLN', 'GLY', 'HIS', 'ILE', 'LEU', 'LYS', 'MET',\
'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL']
seq_channels = numpy.zeros([40, matrix_size, matrix_size], dtype=int)
for i in range(len(seq)):
aa = seq[i]
try:
idx = threelettercodes.index(aa)
except:
print('Resname of following atom not found: {}'.format(aa))
continue
for j in range(len(seq)):
seq_channels[idx][i][j] = 1
seq_channels[idx+20][j][i] = 1 # vertical columns of 1's in next 20 channels
return seq_channels
def mk_full_mats(ag, matrix_size = 12):
nres = len(ag.select('name CA'))
n_n, c_c, ca_ca, cb_cb = get_bb_dist_seq(ag)
omega, theta_asym, phi_asym = get_dihe(ag)
seq_mats = get_seq_mat(ag, matrix_size)
full_mat = numpy.zeros((47, matrix_size, matrix_size))
full_mat[0,0:n_n.shape[0], 0:n_n.shape[1]] = n_n
full_mat[1,0:c_c.shape[0], 0:c_c.shape[1]] = c_c
full_mat[2,0:ca_ca.shape[0], 0:ca_ca.shape[1]] = ca_ca
full_mat[3,0:cb_cb.shape[0], 0:cb_cb.shape[1]] = cb_cb
full_mat[4,0:omega.shape[0], 0:omega.shape[1]] = omega
full_mat[5,0:theta_asym.shape[0], 0:theta_asym.shape[1]] = theta_asym
full_mat[6,0:phi_asym.shape[0], 0:phi_asym.shape[1]] = phi_asym
for i in range(7, 47):
full_mat[i, :, :] = seq_mats[i - 7]
return full_mat
def write_pickle_file(full_mat, pdb, ag, out_folder, tag = ''):
numpy.set_printoptions(threshold=numpy.inf)
pdb_name = pdb.split('.')[0]
pkl_file = out_folder + pdb_name + '_full_mat_' + ag.getTitle() + tag + '.pkl'
with open(pkl_file, 'wb') as f:
print(pkl_file)
pickle.dump(full_mat, f)
return
def write_dist_mat_file(mat, pdb, ag, out_folder, tag = ''):
numpy.set_printoptions(threshold=numpy.inf)
dist_mat_file = pdb.split('.')[0]
dist_mat_file = out_folder + dist_mat_file + '_full_mat_' + ag.getTitle() + tag + '.txt'
with open(dist_mat_file, 'w') as open_file:
for i in mat:
open_file.write(str(i) + '\n')
return
def run_mk_bb_info_mats(workdir, out_path, mat_size = 12, top = 1000, contain_metal = True, opts = None):
os.makedirs(out_path, exist_ok=True)
count = 0
errors = ''
for pdb_name in os.listdir(workdir):
if count >= top:
break
if '.pdb' not in pdb_name:
continue
pdb_file = workdir + pdb_name
pdb = pr.parsePDB(pdb_file)
ags = get_atgs(pdb, contain_metal)
for ag in ags:
try:
if len(ag.select('name CA'))> 12 or len(ag.select('name CA')) < 7:
print(pdb_name + ' not used. ')
continue
full_mat = mk_full_mats(ag, mat_size)
write_dist_mat_file(full_mat, pdb_name, ag, out_path)
write_pickle_file(full_mat, pdb_name, ag, out_path)
count += 1
except:
print('error: ' + pdb_name)
errors += pdb_name + '\n'
if count >= top:
break
with open(out_path + '_error.txt', 'w') as f:
f.write(errors)
return
| true
| true
|
f71791c60cc7fc628325ca20e013c1f715b14cbf
| 1,168
|
py
|
Python
|
openamundsen/__init__.py
|
openamundsen/openamundsen
|
2ac09eb34b0c72c84c421a0dac08d114a05b7b1c
|
[
"MIT"
] | 3
|
2021-05-28T06:46:36.000Z
|
2021-06-14T13:39:25.000Z
|
openamundsen/__init__.py
|
openamundsen/openamundsen
|
2ac09eb34b0c72c84c421a0dac08d114a05b7b1c
|
[
"MIT"
] | 22
|
2021-04-28T12:31:58.000Z
|
2022-03-09T18:29:12.000Z
|
openamundsen/__init__.py
|
openamundsen/openamundsen
|
2ac09eb34b0c72c84c421a0dac08d114a05b7b1c
|
[
"MIT"
] | 1
|
2021-06-01T12:48:54.000Z
|
2021-06-01T12:48:54.000Z
|
from .conf import Configuration, parse_config, read_config
from .model import OpenAmundsen, Model
from . import constants, errors, terrain
# Get version (method as used by matplotlib: https://github.com/matplotlib/matplotlib/blob/bcc1ce8461f5b6e874baaaa02ef776d0243a4abe/lib/matplotlib/__init__.py#L133-L151)
def __getattr__(name):
if name == '__version__':
from pathlib import Path
import setuptools_scm
global __version__
root = Path(__file__).resolve().parents[1]
if (root / '.git').exists() and not (root / '.git/shallow').exists():
__version__ = setuptools_scm.get_version(
root=root,
version_scheme='post-release',
fallback_version='0.0.0+UNKNOWN',
)
else:
try:
from . import _version
__version__ = _version.version
except ImportError:
__version__ = '0.0.0+UNKNOWN'
return __version__
raise AttributeError(f'module {__name__!r} has no attribute {name!r}')
__all__ = [
'OpenAmundsen',
'Configuration',
'parse_config',
'read_config',
]
| 33.371429
| 169
| 0.626712
|
from .conf import Configuration, parse_config, read_config
from .model import OpenAmundsen, Model
from . import constants, errors, terrain
ttr__(name):
if name == '__version__':
from pathlib import Path
import setuptools_scm
global __version__
root = Path(__file__).resolve().parents[1]
if (root / '.git').exists() and not (root / '.git/shallow').exists():
__version__ = setuptools_scm.get_version(
root=root,
version_scheme='post-release',
fallback_version='0.0.0+UNKNOWN',
)
else:
try:
from . import _version
__version__ = _version.version
except ImportError:
__version__ = '0.0.0+UNKNOWN'
return __version__
raise AttributeError(f'module {__name__!r} has no attribute {name!r}')
__all__ = [
'OpenAmundsen',
'Configuration',
'parse_config',
'read_config',
]
| true
| true
|
f71791d678307a538466ef5f55e3423ce32202b3
| 2,431
|
py
|
Python
|
h2o-py/tests/testdir_jira/pyunit_NOPASS_hex_1897_glm_offset.py
|
ChristosChristofidis/h2o-3
|
2a926c0950a98eff5a4c06aeaf0373e17176ecd8
|
[
"Apache-2.0"
] | null | null | null |
h2o-py/tests/testdir_jira/pyunit_NOPASS_hex_1897_glm_offset.py
|
ChristosChristofidis/h2o-3
|
2a926c0950a98eff5a4c06aeaf0373e17176ecd8
|
[
"Apache-2.0"
] | null | null | null |
h2o-py/tests/testdir_jira/pyunit_NOPASS_hex_1897_glm_offset.py
|
ChristosChristofidis/h2o-3
|
2a926c0950a98eff5a4c06aeaf0373e17176ecd8
|
[
"Apache-2.0"
] | 1
|
2020-12-18T19:20:02.000Z
|
2020-12-18T19:20:02.000Z
|
import sys
sys.path.insert(1, "../../")
import h2o
def offset_1897(ip, port):
h2o.init(ip, port)
print 'Checking binomial models for GLM with and without offset'
print 'Import prostate dataset into H2O and R...'
prostate_hex = h2o.import_frame(h2o.locate("smalldata/prostate/prostate.csv"))
print "Checking binomial model without offset..."
prostate_glm_h2o = h2o.glm(x=prostate_hex["RACE", "DPROS", "DCAPS", "PSA", "VOL", "GLEASON"],
y=prostate_hex["CAPSULE"], training_frame=prostate_hex, family="binomial", standardize=False)
print "h2o residual: {0}".format(prostate_glm_h2o.residual_deviance())
print "r residual: {0}".format(379.053509501537)
assert abs(379.053509501537 - prostate_glm_h2o.residual_deviance()) < 0.1
print "Checking binomial model with offset..."
prostate_glm_h2o = h2o.glm(x=prostate_hex["RACE", "DPROS", "DCAPS", "PSA", "VOL", "GLEASON", "AGE"],
y=prostate_hex["CAPSULE"], training_frame=prostate_hex, family="binomial",
offset_column = "AGE", standardize = False)
print "h2o residual: {0}".format(prostate_glm_h2o.residual_deviance())
print "r residual: {0}".format(1515.91815848623)
assert abs(1515.91815848623 - prostate_glm_h2o.residual_deviance()) < 0.1
print "Checking binomial model without offset..."
prostate_glm_h2o = h2o.glm(x=prostate_hex["RACE", "DPROS", "DCAPS", "PSA", "VOL", "GLEASON"],
y=prostate_hex["CAPSULE"], training_frame=prostate_hex, family="poisson", standardize=False)
print "h2o residual: {0}".format(prostate_glm_h2o.residual_deviance())
print "r residual: {0}".format(216.339989007507)
assert abs(216.339989007507 - prostate_glm_h2o.residual_deviance()) < 0.1
print "Checking binomial model with offset..."
prostate_glm_h2o = h2o.glm(x=prostate_hex["RACE", "DPROS", "DCAPS", "PSA", "VOL", "GLEASON", "AGE"],
y=prostate_hex["CAPSULE"], training_frame=prostate_hex, family="poisson",
offset_column = "AGE", standardize = False)
print "h2o residual: {0}".format(prostate_glm_h2o.residual_deviance())
print "r residual: {0}".format(2761.76218461138)
assert abs(2761.76218461138 - prostate_glm_h2o.residual_deviance()) < 0.1
if __name__ == "__main__":
h2o.run_test(sys.argv, offset_1897)
| 55.25
| 124
| 0.661456
|
import sys
sys.path.insert(1, "../../")
import h2o
def offset_1897(ip, port):
h2o.init(ip, port)
print 'Checking binomial models for GLM with and without offset'
print 'Import prostate dataset into H2O and R...'
prostate_hex = h2o.import_frame(h2o.locate("smalldata/prostate/prostate.csv"))
print "Checking binomial model without offset..."
prostate_glm_h2o = h2o.glm(x=prostate_hex["RACE", "DPROS", "DCAPS", "PSA", "VOL", "GLEASON"],
y=prostate_hex["CAPSULE"], training_frame=prostate_hex, family="binomial", standardize=False)
print "h2o residual: {0}".format(prostate_glm_h2o.residual_deviance())
print "r residual: {0}".format(379.053509501537)
assert abs(379.053509501537 - prostate_glm_h2o.residual_deviance()) < 0.1
print "Checking binomial model with offset..."
prostate_glm_h2o = h2o.glm(x=prostate_hex["RACE", "DPROS", "DCAPS", "PSA", "VOL", "GLEASON", "AGE"],
y=prostate_hex["CAPSULE"], training_frame=prostate_hex, family="binomial",
offset_column = "AGE", standardize = False)
print "h2o residual: {0}".format(prostate_glm_h2o.residual_deviance())
print "r residual: {0}".format(1515.91815848623)
assert abs(1515.91815848623 - prostate_glm_h2o.residual_deviance()) < 0.1
print "Checking binomial model without offset..."
prostate_glm_h2o = h2o.glm(x=prostate_hex["RACE", "DPROS", "DCAPS", "PSA", "VOL", "GLEASON"],
y=prostate_hex["CAPSULE"], training_frame=prostate_hex, family="poisson", standardize=False)
print "h2o residual: {0}".format(prostate_glm_h2o.residual_deviance())
print "r residual: {0}".format(216.339989007507)
assert abs(216.339989007507 - prostate_glm_h2o.residual_deviance()) < 0.1
print "Checking binomial model with offset..."
prostate_glm_h2o = h2o.glm(x=prostate_hex["RACE", "DPROS", "DCAPS", "PSA", "VOL", "GLEASON", "AGE"],
y=prostate_hex["CAPSULE"], training_frame=prostate_hex, family="poisson",
offset_column = "AGE", standardize = False)
print "h2o residual: {0}".format(prostate_glm_h2o.residual_deviance())
print "r residual: {0}".format(2761.76218461138)
assert abs(2761.76218461138 - prostate_glm_h2o.residual_deviance()) < 0.1
if __name__ == "__main__":
h2o.run_test(sys.argv, offset_1897)
| false
| true
|
f71791e019fc01edfa394b5acc7c6d6563b6ea45
| 35,147
|
py
|
Python
|
tests/components/mqtt/test_light_json.py
|
jlvaillant/core
|
ae37f9a1d9c5067957854b3c25dcc73fe9a10bee
|
[
"Apache-2.0"
] | 2
|
2019-11-20T20:56:59.000Z
|
2021-01-03T08:52:18.000Z
|
tests/components/mqtt/test_light_json.py
|
jlvaillant/core
|
ae37f9a1d9c5067957854b3c25dcc73fe9a10bee
|
[
"Apache-2.0"
] | 2
|
2021-06-08T21:54:21.000Z
|
2022-03-12T00:37:59.000Z
|
tests/components/mqtt/test_light_json.py
|
sampou/homeassistant
|
6c1a5d9e5a72eed3a582457142b1db7867b6ff7b
|
[
"Apache-2.0"
] | 1
|
2021-04-18T19:36:34.000Z
|
2021-04-18T19:36:34.000Z
|
"""The tests for the MQTT JSON light platform.
Configuration with RGB, brightness, color temp, effect, white value and XY:
light:
platform: mqtt_json
name: mqtt_json_light_1
state_topic: "home/rgb1"
command_topic: "home/rgb1/set"
brightness: true
color_temp: true
effect: true
rgb: true
white_value: true
xy: true
Configuration with RGB, brightness, color temp, effect, white value:
light:
platform: mqtt_json
name: mqtt_json_light_1
state_topic: "home/rgb1"
command_topic: "home/rgb1/set"
brightness: true
color_temp: true
effect: true
rgb: true
white_value: true
Configuration with RGB, brightness, color temp and effect:
light:
platform: mqtt_json
name: mqtt_json_light_1
state_topic: "home/rgb1"
command_topic: "home/rgb1/set"
brightness: true
color_temp: true
effect: true
rgb: true
Configuration with RGB, brightness and color temp:
light:
platform: mqtt_json
name: mqtt_json_light_1
state_topic: "home/rgb1"
command_topic: "home/rgb1/set"
brightness: true
rgb: true
color_temp: true
Configuration with RGB, brightness:
light:
platform: mqtt_json
name: mqtt_json_light_1
state_topic: "home/rgb1"
command_topic: "home/rgb1/set"
brightness: true
rgb: true
Config without RGB:
light:
platform: mqtt_json
name: mqtt_json_light_1
state_topic: "home/rgb1"
command_topic: "home/rgb1/set"
brightness: true
Config without RGB and brightness:
light:
platform: mqtt_json
name: mqtt_json_light_1
state_topic: "home/rgb1"
command_topic: "home/rgb1/set"
Config with brightness and scale:
light:
platform: mqtt_json
name: test
state_topic: "mqtt_json_light_1"
command_topic: "mqtt_json_light_1/set"
brightness: true
brightness_scale: 99
"""
import json
from unittest import mock
from unittest.mock import patch
from homeassistant.components import light
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_SUPPORTED_FEATURES,
STATE_OFF,
STATE_ON,
)
import homeassistant.core as ha
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.common import async_fire_mqtt_message, mock_coro
from tests.components.light import common
DEFAULT_CONFIG = {
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test-topic",
}
}
class JsonValidator(object):
"""Helper to compare JSON."""
def __init__(self, jsondata):
"""Initialize JSON validator."""
self.jsondata = jsondata
def __eq__(self, other):
"""Compare JSON data."""
return json.loads(self.jsondata) == json.loads(other)
async def test_fail_setup_if_no_command_topic(hass, mqtt_mock):
"""Test if setup fails with no command topic."""
assert await async_setup_component(
hass,
light.DOMAIN,
{light.DOMAIN: {"platform": "mqtt", "schema": "json", "name": "test"}},
)
assert hass.states.get("light.test") is None
async def test_no_color_brightness_color_temp_white_val_if_no_topics(hass, mqtt_mock):
"""Test for no RGB, brightness, color temp, effect, white val or XY."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"state_topic": "test_light_rgb",
"command_topic": "test_light_rgb/set",
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 40
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("effect") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("xy_color") is None
assert state.attributes.get("hs_color") is None
async_fire_mqtt_message(hass, "test_light_rgb", '{"state":"ON"}')
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("effect") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("xy_color") is None
assert state.attributes.get("hs_color") is None
async def test_controlling_state_via_topic(hass, mqtt_mock):
"""Test the controlling of the state via topic."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"state_topic": "test_light_rgb",
"command_topic": "test_light_rgb/set",
"brightness": True,
"color_temp": True,
"effect": True,
"rgb": True,
"white_value": True,
"xy": True,
"hs": True,
"qos": "0",
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 191
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("effect") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("xy_color") is None
assert state.attributes.get("hs_color") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Turn on the light, full white
async_fire_mqtt_message(
hass,
"test_light_rgb",
'{"state":"ON",'
'"color":{"r":255,"g":255,"b":255},'
'"brightness":255,'
'"color_temp":155,'
'"effect":"colorloop",'
'"white_value":150}',
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") == (255, 255, 255)
assert state.attributes.get("brightness") == 255
assert state.attributes.get("color_temp") == 155
assert state.attributes.get("effect") == "colorloop"
assert state.attributes.get("white_value") == 150
assert state.attributes.get("xy_color") == (0.323, 0.329)
assert state.attributes.get("hs_color") == (0.0, 0.0)
# Turn the light off
async_fire_mqtt_message(hass, "test_light_rgb", '{"state":"OFF"}')
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "test_light_rgb", '{"state":"ON", "brightness":100}')
light_state = hass.states.get("light.test")
assert light_state.attributes["brightness"] == 100
async_fire_mqtt_message(
hass, "test_light_rgb", '{"state":"ON", ' '"color":{"r":125,"g":125,"b":125}}'
)
light_state = hass.states.get("light.test")
assert light_state.attributes.get("rgb_color") == (255, 255, 255)
async_fire_mqtt_message(
hass, "test_light_rgb", '{"state":"ON", "color":{"x":0.135,"y":0.135}}'
)
light_state = hass.states.get("light.test")
assert light_state.attributes.get("xy_color") == (0.141, 0.14)
async_fire_mqtt_message(
hass, "test_light_rgb", '{"state":"ON", "color":{"h":180,"s":50}}'
)
light_state = hass.states.get("light.test")
assert light_state.attributes.get("hs_color") == (180.0, 50.0)
async_fire_mqtt_message(hass, "test_light_rgb", '{"state":"ON", "color_temp":155}')
light_state = hass.states.get("light.test")
assert light_state.attributes.get("color_temp") == 155
async_fire_mqtt_message(
hass, "test_light_rgb", '{"state":"ON", "effect":"colorloop"}'
)
light_state = hass.states.get("light.test")
assert light_state.attributes.get("effect") == "colorloop"
async_fire_mqtt_message(hass, "test_light_rgb", '{"state":"ON", "white_value":155}')
light_state = hass.states.get("light.test")
assert light_state.attributes.get("white_value") == 155
async def test_sending_mqtt_commands_and_optimistic(hass, mqtt_mock):
"""Test the sending of command in optimistic mode."""
fake_state = ha.State(
"light.test",
"on",
{
"brightness": 95,
"hs_color": [100, 100],
"effect": "random",
"color_temp": 100,
"white_value": 50,
},
)
with patch(
"homeassistant.helpers.restore_state.RestoreEntity.async_get_last_state",
return_value=mock_coro(fake_state),
):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test_light_rgb/set",
"brightness": True,
"color_temp": True,
"effect": True,
"hs": True,
"rgb": True,
"xy": True,
"white_value": True,
"qos": 2,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 95
assert state.attributes.get("hs_color") == (100, 100)
assert state.attributes.get("effect") == "random"
assert state.attributes.get("color_temp") == 100
assert state.attributes.get("white_value") == 50
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 191
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_on(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", '{"state": "ON"}', 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
await common.async_turn_on(hass, "light.test", color_temp=90)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set",
JsonValidator('{"state": "ON", "color_temp": 90}'),
2,
False,
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", '{"state": "OFF"}', 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
mqtt_mock.reset_mock()
await common.async_turn_on(
hass, "light.test", brightness=50, xy_color=[0.123, 0.123]
)
await common.async_turn_on(hass, "light.test", brightness=50, hs_color=[359, 78])
await common.async_turn_on(
hass, "light.test", rgb_color=[255, 128, 0], white_value=80
)
mqtt_mock.async_publish.assert_has_calls(
[
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"r": 0, "g": 123, "b": 255,'
' "x": 0.14, "y": 0.131, "h": 210.824, "s": 100.0},'
' "brightness": 50}'
),
2,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"r": 255, "g": 56, "b": 59,'
' "x": 0.654, "y": 0.301, "h": 359.0, "s": 78.0},'
' "brightness": 50}'
),
2,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"r": 255, "g": 128, "b": 0,'
' "x": 0.611, "y": 0.375, "h": 30.118, "s": 100.0},'
' "white_value": 80}'
),
2,
False,
),
],
any_order=True,
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes["rgb_color"] == (255, 128, 0)
assert state.attributes["brightness"] == 50
assert state.attributes["hs_color"] == (30.118, 100)
assert state.attributes["white_value"] == 80
assert state.attributes["xy_color"] == (0.611, 0.375)
async def test_sending_hs_color(hass, mqtt_mock):
"""Test light.turn_on with hs color sends hs color parameters."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test_light_rgb/set",
"brightness": True,
"hs": True,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
mqtt_mock.reset_mock()
await common.async_turn_on(
hass, "light.test", brightness=50, xy_color=[0.123, 0.123]
)
await common.async_turn_on(hass, "light.test", brightness=50, hs_color=[359, 78])
await common.async_turn_on(
hass, "light.test", rgb_color=[255, 128, 0], white_value=80
)
mqtt_mock.async_publish.assert_has_calls(
[
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"h": 210.824, "s": 100.0},'
' "brightness": 50}'
),
0,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"h": 359.0, "s": 78.0},'
' "brightness": 50}'
),
0,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"h": 30.118, "s": 100.0},'
' "white_value": 80}'
),
0,
False,
),
],
any_order=True,
)
async def test_sending_rgb_color_no_brightness(hass, mqtt_mock):
"""Test light.turn_on with hs color sends rgb color parameters."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test_light_rgb/set",
"rgb": True,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(
hass, "light.test", brightness=50, xy_color=[0.123, 0.123]
)
await common.async_turn_on(hass, "light.test", brightness=50, hs_color=[359, 78])
await common.async_turn_on(
hass, "light.test", rgb_color=[255, 128, 0], brightness=255
)
mqtt_mock.async_publish.assert_has_calls(
[
mock.call(
"test_light_rgb/set",
JsonValidator('{"state": "ON", "color": {"r": 0, "g": 24, "b": 50}}'),
0,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator('{"state": "ON", "color": {"r": 50, "g": 11, "b": 11}}'),
0,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator('{"state": "ON", "color": {"r": 255, "g": 128, "b": 0}}'),
0,
False,
),
],
any_order=True,
)
async def test_sending_rgb_color_with_brightness(hass, mqtt_mock):
"""Test light.turn_on with hs color sends rgb color parameters."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test_light_rgb/set",
"brightness": True,
"rgb": True,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(
hass, "light.test", brightness=50, xy_color=[0.123, 0.123]
)
await common.async_turn_on(hass, "light.test", brightness=50, hs_color=[359, 78])
await common.async_turn_on(
hass, "light.test", rgb_color=[255, 128, 0], white_value=80
)
mqtt_mock.async_publish.assert_has_calls(
[
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"r": 0, "g": 123, "b": 255},'
' "brightness": 50}'
),
0,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"r": 255, "g": 56, "b": 59},'
' "brightness": 50}'
),
0,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"r": 255, "g": 128, "b": 0},'
' "white_value": 80}'
),
0,
False,
),
],
any_order=True,
)
async def test_sending_xy_color(hass, mqtt_mock):
"""Test light.turn_on with hs color sends xy color parameters."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test_light_rgb/set",
"brightness": True,
"xy": True,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(
hass, "light.test", brightness=50, xy_color=[0.123, 0.123]
)
await common.async_turn_on(hass, "light.test", brightness=50, hs_color=[359, 78])
await common.async_turn_on(
hass, "light.test", rgb_color=[255, 128, 0], white_value=80
)
mqtt_mock.async_publish.assert_has_calls(
[
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"x": 0.14, "y": 0.131},'
' "brightness": 50}'
),
0,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"x": 0.654, "y": 0.301},'
' "brightness": 50}'
),
0,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"x": 0.611, "y": 0.375},'
' "white_value": 80}'
),
0,
False,
),
],
any_order=True,
)
async def test_effect(hass, mqtt_mock):
"""Test for effect being sent when included."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test_light_rgb/set",
"effect": True,
"qos": 0,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 44
await common.async_turn_on(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", JsonValidator('{"state": "ON"}'), 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("effect") == "none"
await common.async_turn_on(hass, "light.test", effect="rainbow")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set",
JsonValidator('{"state": "ON", "effect": "rainbow"}'),
0,
False,
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("effect") == "rainbow"
await common.async_turn_on(hass, "light.test", effect="colorloop")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set",
JsonValidator('{"state": "ON", "effect": "colorloop"}'),
0,
False,
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("effect") == "colorloop"
async def test_flash_short_and_long(hass, mqtt_mock):
"""Test for flash length being sent when included."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test_light_rgb/set",
"flash_time_short": 5,
"flash_time_long": 15,
"qos": 0,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 40
await common.async_turn_on(hass, "light.test", flash="short")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", JsonValidator('{"state": "ON", "flash": 5}'), 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
await common.async_turn_on(hass, "light.test", flash="long")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", JsonValidator('{"state": "ON", "flash": 15}'), 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
async def test_transition(hass, mqtt_mock):
"""Test for transition time being sent when included."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test_light_rgb/set",
"qos": 0,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 40
await common.async_turn_on(hass, "light.test", transition=15)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set",
JsonValidator('{"state": "ON", "transition": 15}'),
0,
False,
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
await common.async_turn_off(hass, "light.test", transition=30)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set",
JsonValidator('{"state": "OFF", "transition": 30}'),
0,
False,
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async def test_brightness_scale(hass, mqtt_mock):
"""Test for brightness scaling."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"state_topic": "test_light_bright_scale",
"command_topic": "test_light_bright_scale/set",
"brightness": True,
"brightness_scale": 99,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("brightness") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Turn on the light
async_fire_mqtt_message(hass, "test_light_bright_scale", '{"state":"ON"}')
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 255
# Turn on the light with brightness
async_fire_mqtt_message(
hass, "test_light_bright_scale", '{"state":"ON", "brightness": 99}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 255
async def test_invalid_values(hass, mqtt_mock):
"""Test that invalid color/brightness/white/etc. values are ignored."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"state_topic": "test_light_rgb",
"command_topic": "test_light_rgb/set",
"brightness": True,
"color_temp": True,
"rgb": True,
"white_value": True,
"qos": "0",
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 187
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("color_temp") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Turn on the light
async_fire_mqtt_message(
hass,
"test_light_rgb",
'{"state":"ON",'
'"color":{"r":255,"g":255,"b":255},'
'"brightness": 255,'
'"white_value": 255,'
'"color_temp": 100,'
'"effect": "rainbow"}',
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") == (255, 255, 255)
assert state.attributes.get("brightness") == 255
assert state.attributes.get("white_value") == 255
assert state.attributes.get("color_temp") == 100
# Bad HS color values
async_fire_mqtt_message(
hass, "test_light_rgb", '{"state":"ON",' '"color":{"h":"bad","s":"val"}}',
)
# Color should not have changed
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") == (255, 255, 255)
# Bad RGB color values
async_fire_mqtt_message(
hass,
"test_light_rgb",
'{"state":"ON",' '"color":{"r":"bad","g":"val","b":"test"}}',
)
# Color should not have changed
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") == (255, 255, 255)
# Bad XY color values
async_fire_mqtt_message(
hass, "test_light_rgb", '{"state":"ON",' '"color":{"x":"bad","y":"val"}}',
)
# Color should not have changed
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") == (255, 255, 255)
# Bad brightness values
async_fire_mqtt_message(
hass, "test_light_rgb", '{"state":"ON",' '"brightness": "badValue"}'
)
# Brightness should not have changed
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 255
# Bad white value
async_fire_mqtt_message(
hass, "test_light_rgb", '{"state":"ON",' '"white_value": "badValue"}'
)
# White value should not have changed
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("white_value") == 255
# Bad color temperature
async_fire_mqtt_message(
hass, "test_light_rgb", '{"state":"ON",' '"color_temp": "badValue"}'
)
# Color temperature should not have changed
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("color_temp") == 100
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_payload(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
await help_test_custom_availability_payload(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, light.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, light.DOMAIN, DEFAULT_CONFIG
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, light.DOMAIN, DEFAULT_CONFIG
)
async def test_unique_id(hass):
"""Test unique id option only creates one light per unique_id."""
config = {
light.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"schema": "json",
"state_topic": "test-topic",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"schema": "json",
"state_topic": "test-topic",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, light.DOMAIN, config)
async def test_discovery_removal(hass, mqtt_mock, caplog):
"""Test removal of discovered mqtt_json lights."""
data = '{ "name": "test",' ' "schema": "json",' ' "command_topic": "test_topic" }'
await help_test_discovery_removal(hass, mqtt_mock, caplog, light.DOMAIN, data)
async def test_discovery_update_light(hass, mqtt_mock, caplog):
"""Test update of discovered light."""
data1 = (
'{ "name": "Beer",'
' "schema": "json",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
data2 = (
'{ "name": "Milk",'
' "schema": "json",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
await help_test_discovery_update(
hass, mqtt_mock, caplog, light.DOMAIN, data1, data2
)
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "name": "Beer" }'
data2 = (
'{ "name": "Milk",'
' "schema": "json",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
await help_test_discovery_broken(
hass, mqtt_mock, caplog, light.DOMAIN, data1, data2
)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT light device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT light device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
| 30.912049
| 88
| 0.583094
|
import json
from unittest import mock
from unittest.mock import patch
from homeassistant.components import light
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_SUPPORTED_FEATURES,
STATE_OFF,
STATE_ON,
)
import homeassistant.core as ha
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.common import async_fire_mqtt_message, mock_coro
from tests.components.light import common
DEFAULT_CONFIG = {
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test-topic",
}
}
class JsonValidator(object):
def __init__(self, jsondata):
self.jsondata = jsondata
def __eq__(self, other):
return json.loads(self.jsondata) == json.loads(other)
async def test_fail_setup_if_no_command_topic(hass, mqtt_mock):
assert await async_setup_component(
hass,
light.DOMAIN,
{light.DOMAIN: {"platform": "mqtt", "schema": "json", "name": "test"}},
)
assert hass.states.get("light.test") is None
async def test_no_color_brightness_color_temp_white_val_if_no_topics(hass, mqtt_mock):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"state_topic": "test_light_rgb",
"command_topic": "test_light_rgb/set",
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 40
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("effect") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("xy_color") is None
assert state.attributes.get("hs_color") is None
async_fire_mqtt_message(hass, "test_light_rgb", '{"state":"ON"}')
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("effect") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("xy_color") is None
assert state.attributes.get("hs_color") is None
async def test_controlling_state_via_topic(hass, mqtt_mock):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"state_topic": "test_light_rgb",
"command_topic": "test_light_rgb/set",
"brightness": True,
"color_temp": True,
"effect": True,
"rgb": True,
"white_value": True,
"xy": True,
"hs": True,
"qos": "0",
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 191
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("effect") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("xy_color") is None
assert state.attributes.get("hs_color") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(
hass,
"test_light_rgb",
'{"state":"ON",'
'"color":{"r":255,"g":255,"b":255},'
'"brightness":255,'
'"color_temp":155,'
'"effect":"colorloop",'
'"white_value":150}',
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") == (255, 255, 255)
assert state.attributes.get("brightness") == 255
assert state.attributes.get("color_temp") == 155
assert state.attributes.get("effect") == "colorloop"
assert state.attributes.get("white_value") == 150
assert state.attributes.get("xy_color") == (0.323, 0.329)
assert state.attributes.get("hs_color") == (0.0, 0.0)
async_fire_mqtt_message(hass, "test_light_rgb", '{"state":"OFF"}')
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "test_light_rgb", '{"state":"ON", "brightness":100}')
light_state = hass.states.get("light.test")
assert light_state.attributes["brightness"] == 100
async_fire_mqtt_message(
hass, "test_light_rgb", '{"state":"ON", ' '"color":{"r":125,"g":125,"b":125}}'
)
light_state = hass.states.get("light.test")
assert light_state.attributes.get("rgb_color") == (255, 255, 255)
async_fire_mqtt_message(
hass, "test_light_rgb", '{"state":"ON", "color":{"x":0.135,"y":0.135}}'
)
light_state = hass.states.get("light.test")
assert light_state.attributes.get("xy_color") == (0.141, 0.14)
async_fire_mqtt_message(
hass, "test_light_rgb", '{"state":"ON", "color":{"h":180,"s":50}}'
)
light_state = hass.states.get("light.test")
assert light_state.attributes.get("hs_color") == (180.0, 50.0)
async_fire_mqtt_message(hass, "test_light_rgb", '{"state":"ON", "color_temp":155}')
light_state = hass.states.get("light.test")
assert light_state.attributes.get("color_temp") == 155
async_fire_mqtt_message(
hass, "test_light_rgb", '{"state":"ON", "effect":"colorloop"}'
)
light_state = hass.states.get("light.test")
assert light_state.attributes.get("effect") == "colorloop"
async_fire_mqtt_message(hass, "test_light_rgb", '{"state":"ON", "white_value":155}')
light_state = hass.states.get("light.test")
assert light_state.attributes.get("white_value") == 155
async def test_sending_mqtt_commands_and_optimistic(hass, mqtt_mock):
fake_state = ha.State(
"light.test",
"on",
{
"brightness": 95,
"hs_color": [100, 100],
"effect": "random",
"color_temp": 100,
"white_value": 50,
},
)
with patch(
"homeassistant.helpers.restore_state.RestoreEntity.async_get_last_state",
return_value=mock_coro(fake_state),
):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test_light_rgb/set",
"brightness": True,
"color_temp": True,
"effect": True,
"hs": True,
"rgb": True,
"xy": True,
"white_value": True,
"qos": 2,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 95
assert state.attributes.get("hs_color") == (100, 100)
assert state.attributes.get("effect") == "random"
assert state.attributes.get("color_temp") == 100
assert state.attributes.get("white_value") == 50
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 191
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_on(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", '{"state": "ON"}', 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
await common.async_turn_on(hass, "light.test", color_temp=90)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set",
JsonValidator('{"state": "ON", "color_temp": 90}'),
2,
False,
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", '{"state": "OFF"}', 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
mqtt_mock.reset_mock()
await common.async_turn_on(
hass, "light.test", brightness=50, xy_color=[0.123, 0.123]
)
await common.async_turn_on(hass, "light.test", brightness=50, hs_color=[359, 78])
await common.async_turn_on(
hass, "light.test", rgb_color=[255, 128, 0], white_value=80
)
mqtt_mock.async_publish.assert_has_calls(
[
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"r": 0, "g": 123, "b": 255,'
' "x": 0.14, "y": 0.131, "h": 210.824, "s": 100.0},'
' "brightness": 50}'
),
2,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"r": 255, "g": 56, "b": 59,'
' "x": 0.654, "y": 0.301, "h": 359.0, "s": 78.0},'
' "brightness": 50}'
),
2,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"r": 255, "g": 128, "b": 0,'
' "x": 0.611, "y": 0.375, "h": 30.118, "s": 100.0},'
' "white_value": 80}'
),
2,
False,
),
],
any_order=True,
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes["rgb_color"] == (255, 128, 0)
assert state.attributes["brightness"] == 50
assert state.attributes["hs_color"] == (30.118, 100)
assert state.attributes["white_value"] == 80
assert state.attributes["xy_color"] == (0.611, 0.375)
async def test_sending_hs_color(hass, mqtt_mock):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test_light_rgb/set",
"brightness": True,
"hs": True,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
mqtt_mock.reset_mock()
await common.async_turn_on(
hass, "light.test", brightness=50, xy_color=[0.123, 0.123]
)
await common.async_turn_on(hass, "light.test", brightness=50, hs_color=[359, 78])
await common.async_turn_on(
hass, "light.test", rgb_color=[255, 128, 0], white_value=80
)
mqtt_mock.async_publish.assert_has_calls(
[
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"h": 210.824, "s": 100.0},'
' "brightness": 50}'
),
0,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"h": 359.0, "s": 78.0},'
' "brightness": 50}'
),
0,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"h": 30.118, "s": 100.0},'
' "white_value": 80}'
),
0,
False,
),
],
any_order=True,
)
async def test_sending_rgb_color_no_brightness(hass, mqtt_mock):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test_light_rgb/set",
"rgb": True,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(
hass, "light.test", brightness=50, xy_color=[0.123, 0.123]
)
await common.async_turn_on(hass, "light.test", brightness=50, hs_color=[359, 78])
await common.async_turn_on(
hass, "light.test", rgb_color=[255, 128, 0], brightness=255
)
mqtt_mock.async_publish.assert_has_calls(
[
mock.call(
"test_light_rgb/set",
JsonValidator('{"state": "ON", "color": {"r": 0, "g": 24, "b": 50}}'),
0,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator('{"state": "ON", "color": {"r": 50, "g": 11, "b": 11}}'),
0,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator('{"state": "ON", "color": {"r": 255, "g": 128, "b": 0}}'),
0,
False,
),
],
any_order=True,
)
async def test_sending_rgb_color_with_brightness(hass, mqtt_mock):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test_light_rgb/set",
"brightness": True,
"rgb": True,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(
hass, "light.test", brightness=50, xy_color=[0.123, 0.123]
)
await common.async_turn_on(hass, "light.test", brightness=50, hs_color=[359, 78])
await common.async_turn_on(
hass, "light.test", rgb_color=[255, 128, 0], white_value=80
)
mqtt_mock.async_publish.assert_has_calls(
[
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"r": 0, "g": 123, "b": 255},'
' "brightness": 50}'
),
0,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"r": 255, "g": 56, "b": 59},'
' "brightness": 50}'
),
0,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"r": 255, "g": 128, "b": 0},'
' "white_value": 80}'
),
0,
False,
),
],
any_order=True,
)
async def test_sending_xy_color(hass, mqtt_mock):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test_light_rgb/set",
"brightness": True,
"xy": True,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(
hass, "light.test", brightness=50, xy_color=[0.123, 0.123]
)
await common.async_turn_on(hass, "light.test", brightness=50, hs_color=[359, 78])
await common.async_turn_on(
hass, "light.test", rgb_color=[255, 128, 0], white_value=80
)
mqtt_mock.async_publish.assert_has_calls(
[
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"x": 0.14, "y": 0.131},'
' "brightness": 50}'
),
0,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"x": 0.654, "y": 0.301},'
' "brightness": 50}'
),
0,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"x": 0.611, "y": 0.375},'
' "white_value": 80}'
),
0,
False,
),
],
any_order=True,
)
async def test_effect(hass, mqtt_mock):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test_light_rgb/set",
"effect": True,
"qos": 0,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 44
await common.async_turn_on(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", JsonValidator('{"state": "ON"}'), 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("effect") == "none"
await common.async_turn_on(hass, "light.test", effect="rainbow")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set",
JsonValidator('{"state": "ON", "effect": "rainbow"}'),
0,
False,
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("effect") == "rainbow"
await common.async_turn_on(hass, "light.test", effect="colorloop")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set",
JsonValidator('{"state": "ON", "effect": "colorloop"}'),
0,
False,
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("effect") == "colorloop"
async def test_flash_short_and_long(hass, mqtt_mock):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test_light_rgb/set",
"flash_time_short": 5,
"flash_time_long": 15,
"qos": 0,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 40
await common.async_turn_on(hass, "light.test", flash="short")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", JsonValidator('{"state": "ON", "flash": 5}'), 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
await common.async_turn_on(hass, "light.test", flash="long")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", JsonValidator('{"state": "ON", "flash": 15}'), 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
async def test_transition(hass, mqtt_mock):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test_light_rgb/set",
"qos": 0,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 40
await common.async_turn_on(hass, "light.test", transition=15)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set",
JsonValidator('{"state": "ON", "transition": 15}'),
0,
False,
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
await common.async_turn_off(hass, "light.test", transition=30)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set",
JsonValidator('{"state": "OFF", "transition": 30}'),
0,
False,
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async def test_brightness_scale(hass, mqtt_mock):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"state_topic": "test_light_bright_scale",
"command_topic": "test_light_bright_scale/set",
"brightness": True,
"brightness_scale": 99,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("brightness") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "test_light_bright_scale", '{"state":"ON"}')
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 255
async_fire_mqtt_message(
hass, "test_light_bright_scale", '{"state":"ON", "brightness": 99}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 255
async def test_invalid_values(hass, mqtt_mock):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"state_topic": "test_light_rgb",
"command_topic": "test_light_rgb/set",
"brightness": True,
"color_temp": True,
"rgb": True,
"white_value": True,
"qos": "0",
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 187
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("color_temp") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(
hass,
"test_light_rgb",
'{"state":"ON",'
'"color":{"r":255,"g":255,"b":255},'
'"brightness": 255,'
'"white_value": 255,'
'"color_temp": 100,'
'"effect": "rainbow"}',
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") == (255, 255, 255)
assert state.attributes.get("brightness") == 255
assert state.attributes.get("white_value") == 255
assert state.attributes.get("color_temp") == 100
async_fire_mqtt_message(
hass, "test_light_rgb", '{"state":"ON",' '"color":{"h":"bad","s":"val"}}',
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") == (255, 255, 255)
async_fire_mqtt_message(
hass,
"test_light_rgb",
'{"state":"ON",' '"color":{"r":"bad","g":"val","b":"test"}}',
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") == (255, 255, 255)
async_fire_mqtt_message(
hass, "test_light_rgb", '{"state":"ON",' '"color":{"x":"bad","y":"val"}}',
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") == (255, 255, 255)
async_fire_mqtt_message(
hass, "test_light_rgb", '{"state":"ON",' '"brightness": "badValue"}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 255
async_fire_mqtt_message(
hass, "test_light_rgb", '{"state":"ON",' '"white_value": "badValue"}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("white_value") == 255
async_fire_mqtt_message(
hass, "test_light_rgb", '{"state":"ON",' '"color_temp": "badValue"}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("color_temp") == 100
async def test_availability_without_topic(hass, mqtt_mock):
await help_test_availability_without_topic(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_payload(hass, mqtt_mock):
await help_test_default_availability_payload(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_custom_availability_payload(hass, mqtt_mock):
await help_test_custom_availability_payload(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
await help_test_setting_attribute_with_template(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, light.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, light.DOMAIN, DEFAULT_CONFIG
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, light.DOMAIN, DEFAULT_CONFIG
)
async def test_unique_id(hass):
config = {
light.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"schema": "json",
"state_topic": "test-topic",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"schema": "json",
"state_topic": "test-topic",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, light.DOMAIN, config)
async def test_discovery_removal(hass, mqtt_mock, caplog):
data = '{ "name": "test",' ' "schema": "json",' ' "command_topic": "test_topic" }'
await help_test_discovery_removal(hass, mqtt_mock, caplog, light.DOMAIN, data)
async def test_discovery_update_light(hass, mqtt_mock, caplog):
data1 = (
'{ "name": "Beer",'
' "schema": "json",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
data2 = (
'{ "name": "Milk",'
' "schema": "json",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
await help_test_discovery_update(
hass, mqtt_mock, caplog, light.DOMAIN, data1, data2
)
async def test_discovery_broken(hass, mqtt_mock, caplog):
data1 = '{ "name": "Beer" }'
data2 = (
'{ "name": "Milk",'
' "schema": "json",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
await help_test_discovery_broken(
hass, mqtt_mock, caplog, light.DOMAIN, data1, data2
)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_update(hass, mqtt_mock):
await help_test_entity_device_info_update(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_remove(hass, mqtt_mock):
await help_test_entity_device_info_remove(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
| true
| true
|
f717922d65b80aab39f063f12af29d0dc7bc2b8a
| 4,431
|
py
|
Python
|
tests/test_settings.py
|
lulle2007200/EasyClangComplete
|
55d2c47d9f1b9f2bc85e6cc6de8b92457d9bbeb6
|
[
"MIT"
] | 648
|
2016-04-18T16:10:47.000Z
|
2022-03-30T01:48:53.000Z
|
tests/test_settings.py
|
lulle2007200/EasyClangComplete
|
55d2c47d9f1b9f2bc85e6cc6de8b92457d9bbeb6
|
[
"MIT"
] | 672
|
2016-04-24T13:55:35.000Z
|
2022-03-23T06:38:42.000Z
|
tests/test_settings.py
|
lulle2007200/EasyClangComplete
|
55d2c47d9f1b9f2bc85e6cc6de8b92457d9bbeb6
|
[
"MIT"
] | 130
|
2016-05-26T19:27:00.000Z
|
2022-01-15T10:24:17.000Z
|
"""Tests for settings."""
import sublime
import imp
from os import path
from EasyClangComplete.tests.gui_test_wrapper import GuiTestWrapper
from EasyClangComplete.plugin.settings import settings_manager
from EasyClangComplete.plugin.settings import settings_storage
from EasyClangComplete.plugin.utils import flag
imp.reload(settings_manager)
imp.reload(settings_storage)
imp.reload(flag)
SettingsManager = settings_manager.SettingsManager
SettingsStorage = settings_storage.SettingsStorage
Flag = flag.Flag
class test_settings(GuiTestWrapper):
"""Test settings."""
def test_setup_view(self):
"""Test that setup view correctly sets up the view."""
file_name = path.join(path.dirname(__file__),
'test_files',
'test.cpp')
self.check_view(file_name)
def test_init(self):
"""Test that settings are correctly initialized."""
manager = SettingsManager()
settings = manager.user_settings()
self.assertIsNotNone(settings.verbose)
self.assertIsNotNone(settings.triggers)
self.assertIsNotNone(settings.common_flags)
self.assertIsNotNone(settings.clang_binary)
self.assertIsNotNone(settings.flags_sources)
self.assertIsNotNone(settings.show_errors)
self.assertIsNotNone(settings.valid_lang_syntaxes)
def test_valid(self):
"""Test validity."""
manager = SettingsManager()
settings = manager.user_settings()
valid, _ = settings.is_valid()
self.assertTrue(valid)
def test_parse_cmake_flags(self):
"""Testing that we can parse cmake flags."""
file_name = path.join(path.dirname(__file__),
'test_files',
'test_wrong_triggers.cpp')
self.set_up_view(file_name)
current_folder = path.dirname(__file__)
flags_sources = [
{
"file": "CMakeLists.txt",
"flags": [
"-DBLAH={}/*".format(current_folder),
"-DSMTH=ON",
"-D XXX=1",
"-D FLAG=word"
]
}
]
self.view.settings().set("flags_sources", flags_sources)
settings = SettingsManager().user_settings()
settings.update_from_view(self.view, project_specific=False)
valid, _ = settings.is_valid()
self.assertTrue(valid)
self.assertEquals(len(settings.flags_sources), 1)
entry = settings.flags_sources[0]
self.assertIn('flags', entry)
flags = entry['flags']
self.assertEquals(len(flags), 4)
self.assertIn('-DSMTH=ON', flags)
self.assertIn('-D FLAG=word', flags)
self.assertIn('-D XXX=1', flags)
import glob
all_files = glob.glob(path.join(current_folder, "*"))
for file in all_files:
self.assertIn(file, flags[0])
def test_populate_flags(self):
"""Testing include population."""
# open any existing file
file_name = path.join(path.dirname(__file__),
'test_files',
'test_wrong_triggers.cpp')
self.set_up_view(file_name)
# now test the things
manager = SettingsManager()
settings = manager.user_settings()
valid, _ = settings.is_valid()
self.assertTrue(valid)
p = path.join(sublime.packages_path(),
"User",
"EasyClangComplete.sublime-settings")
if path.exists(p):
user = sublime.load_resource(
"Packages/User/EasyClangComplete.sublime-settings")
if "common_flags" in user:
# The user modified the default common flags, just skip the
# next few tests.
return
initial_common_flags = list(settings.common_flags)
settings = manager.settings_for_view(self.view)
dirs = settings.common_flags
self.assertTrue(len(initial_common_flags) <= len(dirs))
reference_flag_0 = Flag.Builder().from_unparsed_string(
initial_common_flags[0]).build()
self.assertIn(reference_flag_0, dirs)
reference_flag_1 = Flag.Builder().from_unparsed_string(
initial_common_flags[1]).build()
self.assertNotIn(reference_flag_1, dirs)
| 35.448
| 75
| 0.612051
|
import sublime
import imp
from os import path
from EasyClangComplete.tests.gui_test_wrapper import GuiTestWrapper
from EasyClangComplete.plugin.settings import settings_manager
from EasyClangComplete.plugin.settings import settings_storage
from EasyClangComplete.plugin.utils import flag
imp.reload(settings_manager)
imp.reload(settings_storage)
imp.reload(flag)
SettingsManager = settings_manager.SettingsManager
SettingsStorage = settings_storage.SettingsStorage
Flag = flag.Flag
class test_settings(GuiTestWrapper):
def test_setup_view(self):
file_name = path.join(path.dirname(__file__),
'test_files',
'test.cpp')
self.check_view(file_name)
def test_init(self):
manager = SettingsManager()
settings = manager.user_settings()
self.assertIsNotNone(settings.verbose)
self.assertIsNotNone(settings.triggers)
self.assertIsNotNone(settings.common_flags)
self.assertIsNotNone(settings.clang_binary)
self.assertIsNotNone(settings.flags_sources)
self.assertIsNotNone(settings.show_errors)
self.assertIsNotNone(settings.valid_lang_syntaxes)
def test_valid(self):
manager = SettingsManager()
settings = manager.user_settings()
valid, _ = settings.is_valid()
self.assertTrue(valid)
def test_parse_cmake_flags(self):
file_name = path.join(path.dirname(__file__),
'test_files',
'test_wrong_triggers.cpp')
self.set_up_view(file_name)
current_folder = path.dirname(__file__)
flags_sources = [
{
"file": "CMakeLists.txt",
"flags": [
"-DBLAH={}/*".format(current_folder),
"-DSMTH=ON",
"-D XXX=1",
"-D FLAG=word"
]
}
]
self.view.settings().set("flags_sources", flags_sources)
settings = SettingsManager().user_settings()
settings.update_from_view(self.view, project_specific=False)
valid, _ = settings.is_valid()
self.assertTrue(valid)
self.assertEquals(len(settings.flags_sources), 1)
entry = settings.flags_sources[0]
self.assertIn('flags', entry)
flags = entry['flags']
self.assertEquals(len(flags), 4)
self.assertIn('-DSMTH=ON', flags)
self.assertIn('-D FLAG=word', flags)
self.assertIn('-D XXX=1', flags)
import glob
all_files = glob.glob(path.join(current_folder, "*"))
for file in all_files:
self.assertIn(file, flags[0])
def test_populate_flags(self):
file_name = path.join(path.dirname(__file__),
'test_files',
'test_wrong_triggers.cpp')
self.set_up_view(file_name)
manager = SettingsManager()
settings = manager.user_settings()
valid, _ = settings.is_valid()
self.assertTrue(valid)
p = path.join(sublime.packages_path(),
"User",
"EasyClangComplete.sublime-settings")
if path.exists(p):
user = sublime.load_resource(
"Packages/User/EasyClangComplete.sublime-settings")
if "common_flags" in user:
return
initial_common_flags = list(settings.common_flags)
settings = manager.settings_for_view(self.view)
dirs = settings.common_flags
self.assertTrue(len(initial_common_flags) <= len(dirs))
reference_flag_0 = Flag.Builder().from_unparsed_string(
initial_common_flags[0]).build()
self.assertIn(reference_flag_0, dirs)
reference_flag_1 = Flag.Builder().from_unparsed_string(
initial_common_flags[1]).build()
self.assertNotIn(reference_flag_1, dirs)
| true
| true
|
f71792385df7b999e78480b6b8bb47ec29d5d377
| 922
|
py
|
Python
|
arrays/countNumberOfPairsWithAbsoluteDifferenceK.py
|
kushvr7/High-On-DSA
|
d424bd48e35ebd1a588d96c1c0dcb5a9c47a0ef1
|
[
"MIT"
] | 76
|
2021-12-12T08:42:20.000Z
|
2022-03-31T19:48:46.000Z
|
arrays/countNumberOfPairsWithAbsoluteDifferenceK.py
|
kushvr7/High-On-DSA
|
d424bd48e35ebd1a588d96c1c0dcb5a9c47a0ef1
|
[
"MIT"
] | 4
|
2022-01-04T09:58:39.000Z
|
2022-03-30T17:00:39.000Z
|
arrays/countNumberOfPairsWithAbsoluteDifferenceK.py
|
kushvr7/High-On-DSA
|
d424bd48e35ebd1a588d96c1c0dcb5a9c47a0ef1
|
[
"MIT"
] | 13
|
2021-12-12T14:44:41.000Z
|
2022-03-10T14:08:20.000Z
|
# https://leetcode.com/problems/count-number-of-pairs-with-absolute-difference-k/
from collections import Counter
class Solution(object):
# Brute Force Approach
# TC : O(N
# SC : O(N)
def countKDifference(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
count =0
for i in range(len(nums)):
for j in range(i, len(nums)):
if abs(nums[i]-nums[j])==k:
count +=1
return count
def countKDifference(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
dictionary =Counter()
count=0
for i in nums:
dictionary[i]+=1
count += dictionary[i-k] + dictionary[i+k]
return count
| 23.05
| 81
| 0.452278
|
from collections import Counter
class Solution(object):
def countKDifference(self, nums, k):
count =0
for i in range(len(nums)):
for j in range(i, len(nums)):
if abs(nums[i]-nums[j])==k:
count +=1
return count
def countKDifference(self, nums, k):
dictionary =Counter()
count=0
for i in nums:
dictionary[i]+=1
count += dictionary[i-k] + dictionary[i+k]
return count
| true
| true
|
f71792d21101ca8f9fdb2b92a64c021d5e816fea
| 28,771
|
py
|
Python
|
pyspark/bigdl/transform/vision/image.py
|
twicoder/BigDL
|
f065db372e1c682fa4a7903e287bba21d5f46750
|
[
"Apache-2.0"
] | 55
|
2018-01-12T01:43:29.000Z
|
2021-03-09T02:35:56.000Z
|
pyspark/bigdl/transform/vision/image.py
|
jason-hzw/BigDL
|
ef4f4137965147e2bc59e41f40c4acbb50eeda97
|
[
"Apache-2.0"
] | 4
|
2018-01-15T07:34:41.000Z
|
2018-01-16T05:46:12.000Z
|
pyspark/bigdl/transform/vision/image.py
|
jason-hzw/BigDL
|
ef4f4137965147e2bc59e41f40c4acbb50eeda97
|
[
"Apache-2.0"
] | 22
|
2018-01-15T14:18:15.000Z
|
2019-12-16T18:51:33.000Z
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from bigdl.util.common import JavaValue
from bigdl.util.common import callBigDlFunc
from bigdl.util.common import *
if sys.version >= '3':
long = int
unicode = str
class FeatureTransformer(JavaValue):
"""
FeatureTransformer is a transformer that transform ImageFeature
"""
def __init__(self, bigdl_type="float", *args):
self.value = callBigDlFunc(
bigdl_type, JavaValue.jvm_class_constructor(self), *args)
def transform(self, image_feature, bigdl_type="float"):
"""
transform ImageFeature
"""
callBigDlFunc(bigdl_type, "transformImageFeature", self.value, image_feature)
return image_feature
def __call__(self, image_frame, bigdl_type="float"):
"""
transform ImageFrame
"""
jframe = callBigDlFunc(bigdl_type,
"transformImageFrame", self.value, image_frame)
return ImageFrame(jvalue=jframe)
class Pipeline(FeatureTransformer):
"""
Pipeline of FeatureTransformer
"""
def __init__(self, transformers, bigdl_type="float"):
for transfomer in transformers:
assert transfomer.__class__.__bases__[0].__name__ == "FeatureTransformer", "the transformer should be " \
"subclass of FeatureTransformer"
super(Pipeline, self).__init__(bigdl_type, transformers)
class ImageFeature(JavaValue):
"""
Each ImageFeature keeps information about single image,
it can include various status of an image,
e.g. original bytes read from image file, an opencv mat,
pixels in float array, image label, meta data and so on.
it uses HashMap to store all these data,
the key is string that identify the corresponding value
"""
def __init__(self, image=None, label=None, path=None, bigdl_type="float"):
image_tensor = JTensor.from_ndarray(image) if image is not None else None
label_tensor = JTensor.from_ndarray(label) if label is not None else None
self.bigdl_type = bigdl_type
self.value = callBigDlFunc(
bigdl_type, JavaValue.jvm_class_constructor(self), image_tensor, label_tensor, path)
def get_image(self, float_key="floats", to_chw=True):
"""
get image as ndarray from ImageFeature
"""
tensor = callBigDlFunc(self.bigdl_type, "imageFeatureToImageTensor", self.value,
float_key, to_chw)
return tensor.to_ndarray()
def get_label(self):
"""
get label as ndarray from ImageFeature
"""
label = callBigDlFunc(self.bigdl_type, "imageFeatureToLabelTensor", self.value)
return label.to_ndarray()
def keys(self):
"""
get key set from ImageFeature
"""
return callBigDlFunc(self.bigdl_type, "imageFeatureGetKeys", self.value)
class ImageFrame(JavaValue):
"""
ImageFrame wraps a set of ImageFeature
"""
def __init__(self, jvalue, bigdl_type="float"):
self.value = jvalue
self.bigdl_type = bigdl_type
if self.is_local():
self.image_frame = LocalImageFrame(jvalue=self.value)
else:
self.image_frame = DistributedImageFrame(jvalue=self.value)
@classmethod
def read(cls, path, sc=None, min_partitions=1, bigdl_type="float"):
"""
Read images as Image Frame
if sc is defined, Read image as DistributedImageFrame from local file system or HDFS
if sc is null, Read image as LocalImageFrame from local file system
:param path path to read images
if sc is defined, path can be local or HDFS. Wildcard character are supported.
if sc is null, path is local directory/image file/image file with wildcard character
:param sc SparkContext
:param min_partitions A suggestion value of the minimal splitting number for input data.
:return ImageFrame
"""
return ImageFrame(jvalue=callBigDlFunc(bigdl_type, "read", path, sc, min_partitions))
@classmethod
def read_parquet(cls, path, sc, bigdl_type="float"):
"""
Read parquet file as DistributedImageFrame
"""
return DistributedImageFrame(jvalue=callBigDlFunc(bigdl_type, "readParquet", path, sc))
@classmethod
def write_parquet(cls, path, output, sc, partition_num = 1, bigdl_type="float"):
"""
write ImageFrame as parquet file
"""
return callBigDlFunc(bigdl_type, "writeParquet", path, output, sc, partition_num)
def is_local(self):
"""
whether this is a LocalImageFrame
"""
return callBigDlFunc(self.bigdl_type, "isLocal", self.value)
def is_distributed(self):
"""
whether this is a DistributedImageFrame
"""
return callBigDlFunc(self.bigdl_type, "isDistributed", self.value)
def transform(self, transformer, bigdl_type="float"):
"""
transformImageFrame
"""
self.value = callBigDlFunc(bigdl_type,
"transformImageFrame", transformer, self.value)
return self
def get_image(self, float_key="floats", to_chw=True):
"""
get image from ImageFrame
"""
return self.image_frame.get_image(float_key, to_chw)
def get_label(self):
"""
get label from ImageFrame
"""
return self.image_frame.get_label()
def get_predict(self, key="predict"):
"""
get prediction from ImageFrame
"""
return self.image_frame.get_predict(key)
def get_sample(self):
"""
get sample from ImageFrame
"""
return self.image_frame.get_sample()
def get_uri(self):
"""
get uri from imageframe
"""
return self.image_frame.get_uri()
def set_label(self, label, bigdl_type="float"):
"""
set label for imageframe
"""
return callBigDlFunc(bigdl_type,
"setLabel", label, self.value)
def random_split(self, weights):
"""
Random split imageframes according to weights
:param weights: weights for each ImageFrame
:return:
"""
jvalues = self.image_frame.random_split(weights)
return [ImageFrame(jvalue) for jvalue in jvalues]
class LocalImageFrame(ImageFrame):
"""
LocalImageFrame wraps a list of ImageFeature
"""
def __init__(self, image_list=None, label_list=None, jvalue=None, bigdl_type="float"):
assert jvalue or image_list, "jvalue and image_list cannot be None in the same time"
if jvalue:
self.value = jvalue
else:
# init from image ndarray list and label rdd(optional)
image_tensor_list = map(lambda image: JTensor.from_ndarray(image), image_list)
label_tensor_list = map(lambda label: JTensor.from_ndarray(label), label_list) if label_list else None
self.value = callBigDlFunc(bigdl_type, JavaValue.jvm_class_constructor(self),
image_tensor_list, label_tensor_list)
self.bigdl_type = bigdl_type
def get_image(self, float_key="floats", to_chw=True):
"""
get image list from ImageFrame
"""
tensors = callBigDlFunc(self.bigdl_type,
"localImageFrameToImageTensor", self.value, float_key, to_chw)
return map(lambda tensor: tensor.to_ndarray(), tensors)
def get_label(self):
"""
get label list from ImageFrame
"""
labels = callBigDlFunc(self.bigdl_type, "localImageFrameToLabelTensor", self.value)
return map(lambda tensor: tensor.to_ndarray(), labels)
def get_predict(self, key="predict"):
"""
get prediction list from ImageFrame
"""
predicts = callBigDlFunc(self.bigdl_type, "localImageFrameToPredict", self.value, key)
return map(lambda predict: (predict[0], predict[1].to_ndarray()) if predict[1] else (predict[0], None), predicts)
def get_sample(self, key="sample"):
return callBigDlFunc(self.bigdl_type, "localImageFrameToSample", self.value, key)
def get_uri(self, key = "uri"):
return callBigDlFunc(self.bigdl_type, "localImageFrameToUri", self.value, key)
def random_split(self, weights):
raise "random split not supported in LocalImageFrame"
class DistributedImageFrame(ImageFrame):
"""
DistributedImageFrame wraps an RDD of ImageFeature
"""
def __init__(self, image_rdd=None, label_rdd=None, jvalue=None, bigdl_type="float"):
assert jvalue or image_rdd, "jvalue and image_rdd cannot be None in the same time"
if jvalue:
self.value = jvalue
else:
# init from image ndarray rdd and label rdd(optional)
image_tensor_rdd = image_rdd.map(lambda image: JTensor.from_ndarray(image))
label_tensor_rdd = label_rdd.map(lambda label: JTensor.from_ndarray(label)) if label_rdd else None
self.value = callBigDlFunc(bigdl_type, JavaValue.jvm_class_constructor(self),
image_tensor_rdd, label_tensor_rdd)
self.bigdl_type = bigdl_type
def get_image(self, float_key="floats", to_chw=True):
"""
get image rdd from ImageFrame
"""
tensor_rdd = callBigDlFunc(self.bigdl_type,
"distributedImageFrameToImageTensorRdd", self.value, float_key, to_chw)
return tensor_rdd.map(lambda tensor: tensor.to_ndarray())
def get_label(self):
"""
get label rdd from ImageFrame
"""
tensor_rdd = callBigDlFunc(self.bigdl_type, "distributedImageFrameToLabelTensorRdd", self.value)
return tensor_rdd.map(lambda tensor: tensor.to_ndarray())
def get_predict(self, key="predict"):
"""
get prediction rdd from ImageFrame
"""
predicts = callBigDlFunc(self.bigdl_type, "distributedImageFrameToPredict", self.value, key)
return predicts.map(lambda predict: (predict[0], predict[1].to_ndarray()) if predict[1] else (predict[0], None))
def get_sample(self, key="sample"):
return callBigDlFunc(self.bigdl_type, "distributedImageFrameToSample", self.value, key)
def get_uri(self, key = "uri"):
return callBigDlFunc(self.bigdl_type, "distributedImageFrameToUri", self.value, key)
def random_split(self, weights):
return callBigDlFunc(self.bigdl_type, "distributedImageFrameRandomSplit", self.value, weights)
class HFlip(FeatureTransformer):
"""
Flip the image horizontally
"""
def __init__(self, bigdl_type="float"):
super(HFlip, self).__init__(bigdl_type)
class Resize(FeatureTransformer):
"""
Resize image
:param resize_h height after resize
:param resize_w width after resize
:param resize_mode if resizeMode = -1, random select a mode from (Imgproc.INTER_LINEAR,
Imgproc.INTER_CUBIC, Imgproc.INTER_AREA, Imgproc.INTER_NEAREST, Imgproc.INTER_LANCZOS4)
:param use_scale_factor if true, scale factor fx and fy is used, fx = fy = 0
note that the result of the following are different
Imgproc.resize(mat, mat, new Size(resizeWH, resizeWH), 0, 0, Imgproc.INTER_LINEAR)
Imgproc.resize(mat, mat, new Size(resizeWH, resizeWH))
"""
def __init__(self, resize_h, resize_w, resize_mode = 1, use_scale_factor=True,
bigdl_type="float"):
super(Resize, self).__init__(bigdl_type, resize_h, resize_w, resize_mode, use_scale_factor)
class Brightness(FeatureTransformer):
"""
adjust the image brightness
:param deltaLow brightness parameter: low bound
:param deltaHigh brightness parameter: high bound
"""
def __init__(self, delta_low, delta_high, bigdl_type="float"):
super(Brightness, self).__init__(bigdl_type, delta_low, delta_high)
class ChannelOrder(FeatureTransformer):
"""
random change the channel of an image
"""
def __init__(self, bigdl_type="float"):
super(ChannelOrder, self).__init__(bigdl_type)
class Contrast(FeatureTransformer):
"""
Adjust the image contrast
:param delta_low contrast parameter low bound
:param delta_high contrast parameter high bound
"""
def __init__(self, delta_low, delta_high, bigdl_type="float"):
super(Contrast, self).__init__(bigdl_type, delta_low, delta_high)
class Saturation(FeatureTransformer):
"""
Adjust image saturation
"""
def __init__(self, delta_low, delta_high, bigdl_type="float"):
super(Saturation, self).__init__(bigdl_type, delta_low, delta_high)
class Hue(FeatureTransformer):
"""
Adjust image hue
:param delta_low hue parameter: low bound
:param delta_high hue parameter: high bound
"""
def __init__(self, delta_low, delta_high, bigdl_type="float"):
super(Hue, self).__init__(bigdl_type, delta_low, delta_high)
class ChannelNormalize(FeatureTransformer):
"""
image channel normalize
:param mean_r mean value in R channel
:param mean_g mean value in G channel
:param meanB_b mean value in B channel
:param std_r std value in R channel
:param std_g std value in G channel
:param std_b std value in B channel
"""
def __init__(self, mean_r, mean_g, mean_b, std_r=1.0, std_g=1.0, std_b=1.0, bigdl_type="float"):
super(ChannelNormalize, self).__init__(bigdl_type, mean_r, mean_g, mean_b, std_r, std_g, std_b)
class PixelNormalize(FeatureTransformer):
"""
Pixel level normalizer, data(i) = data(i) - mean(i)
:param means pixel level mean, following H * W * C order
"""
def __init__(self, means, bigdl_type="float"):
super(PixelNormalize, self).__init__(bigdl_type, means)
class RandomCrop(FeatureTransformer):
"""
Random crop a `cropWidth` x `cropHeight` patch from an image.
The patch size should be less than the image size.
:param crop_width width after crop
:param crop_height height after crop
:param is_clip whether to clip the roi to image boundaries
"""
def __init__(self, crop_width, crop_height, is_clip=True, bigdl_type="float"):
super(RandomCrop, self).__init__(bigdl_type, crop_width, crop_height, is_clip)
class CenterCrop(FeatureTransformer):
"""
Crop a `cropWidth` x `cropHeight` patch from center of image.
The patch size should be less than the image size.
:param crop_width width after crop
:param crop_height height after crop
:param is_clip clip cropping box boundary
"""
def __init__(self, crop_width, crop_height, is_clip=True, bigdl_type="float"):
super(CenterCrop, self).__init__(bigdl_type, crop_width, crop_height, is_clip)
class FixedCrop(FeatureTransformer):
"""
Crop a fixed area of image
:param x1 start in width
:param y1 start in height
:param x2 end in width
:param y2 end in height
:param normalized whether args are normalized, i.e. in range [0, 1]
:param is_clip whether to clip the roi to image boundaries
"""
def __init__(self, x1, y1, x2, y2, normalized=True, is_clip=True, bigdl_type="float"):
super(FixedCrop, self).__init__(bigdl_type, x1, y1, x2, y2, normalized, is_clip)
class DetectionCrop(FeatureTransformer):
"""
Crop from object detections, each image should has a tensor detection,
which is stored in ImageFeature
:param roi_key key that map a tensor detection
:param normalized whether is detection is normalized, i.e. in range [0, 1]
"""
def __init__(self, roi_key, normalized=True, bigdl_type="float"):
super(DetectionCrop, self).__init__(bigdl_type, roi_key, normalized)
class Expand(FeatureTransformer):
"""
expand image, fill the blank part with the meanR, meanG, meanB
:param means_r means in R channel
:param means_g means in G channel
:param means_b means in B channel
:param min_expand_ratio min expand ratio
:param max_expand_ratio max expand ratio
"""
def __init__(self, means_r=123, means_g=117, means_b=104,
min_expand_ratio=1.0,
max_expand_ratio=4.0, bigdl_type="float"):
super(Expand, self).__init__(bigdl_type, means_r, means_g, means_b,
min_expand_ratio, max_expand_ratio)
class Filler(FeatureTransformer):
"""
Fill part of image with certain pixel value
:param start_x start x ratio
:param start_y start y ratio
:param end_x end x ratio
:param end_y end y ratio
:param value filling value
"""
def __init__(self, start_x, start_y, end_x, end_y, value = 255, bigdl_type="float"):
super(Filler, self).__init__(bigdl_type, start_x,
start_y,
end_x,
end_y,
value)
class RandomTransformer(FeatureTransformer):
"""
It is a wrapper for transformers to control the transform probability
:param transformer transformer to apply randomness
:param prob max prob
"""
def __init__(self, transformer, prob, bigdl_type="float"):
super(RandomTransformer, self).__init__(bigdl_type, transformer, prob)
class ColorJitter(FeatureTransformer):
"""
Random adjust brightness, contrast, hue, saturation
:param brightness_prob probability to adjust brightness
:param brightness_delta brightness parameter
:param contrast_prob probability to adjust contrast
:param contrast_lower contrast lower parameter
:param contrast_upper contrast upper parameter
:param hue_prob probability to adjust hue
:param hue_delta hue parameter
:param saturation_prob probability to adjust saturation
:param saturation_lower saturation lower parameter
:param saturation_upper saturation upper parameter
:param random_order_prob random order for different operation
:param shuffle shuffle the transformers
"""
def __init__(self, brightness_prob = 0.5,
brightness_delta = 32.0,
contrast_prob = 0.5,
contrast_lower = 0.5,
contrast_upper = 1.5,
hue_prob = 0.5,
hue_delta = 18.0,
saturation_prob = 0.5,
saturation_lower = 0.5,
saturation_upper = 1.5,
random_order_prob = 0.0,
shuffle = False,
bigdl_type="float"):
super(ColorJitter, self).__init__(bigdl_type, brightness_prob,
brightness_delta,
contrast_prob,
contrast_lower,
contrast_upper,
hue_prob,
hue_delta,
saturation_prob,
saturation_lower,
saturation_upper,
random_order_prob,
shuffle)
class RandomSampler(FeatureTransformer):
"""
Random sample a bounding box given some constraints and crop the image
This is used in SSD training augmentation
"""
def __init__(self):
super(RandomSampler, self).__init__(bigdl_type)
class RoiProject(FeatureTransformer):
"""
Project gt boxes onto the coordinate system defined by image boundary
:param need_meet_center_constraint whether need to meet center constraint, i.e., the center of gt box need be within image boundary
"""
def __init__(self, need_meet_center_constraint, bigdl_type="float"):
super(RoiProject, self).__init__(bigdl_type, need_meet_center_constraint)
class RoiHFlip(FeatureTransformer):
"""
horizontally flip the roi
:param normalized whether the roi is normalized, i.e. in range [0, 1]
"""
def __init__(self, normalized=True, bigdl_type="float"):
super(RoiHFlip, self).__init__(bigdl_type, normalized)
class RoiResize(FeatureTransformer):
"""
resize the roi according to scale
:param normalized whether the roi is normalized, i.e. in range [0, 1]
"""
def __init__(self, normalized=True, bigdl_type="float"):
super(RoiResize, self).__init__(bigdl_type, normalized)
class RoiNormalize(FeatureTransformer):
"""
Normalize Roi to [0, 1]
"""
def __init__(self, bigdl_type="float"):
super(RoiNormalize, self).__init__(bigdl_type)
class MatToFloats(FeatureTransformer):
"""
Transform OpenCVMat to float array, note that in this transformer, the mat is released
:param valid_height valid height in case the mat is invalid
:param valid_width valid width in case the mat is invalid
:param valid_channel valid channel in case the mat is invalid
:param out_key key to store float array
:param share_buffer share buffer of output
"""
def __init__(self, valid_height=300, valid_width=300, valid_channel=300,
out_key = "floats", share_buffer=True, bigdl_type="float"):
super(MatToFloats, self).__init__(bigdl_type, valid_height, valid_width, valid_channel,
out_key, share_buffer)
class MatToTensor(FeatureTransformer):
"""
transform opencv mat to tensor
:param to_rgb BGR to RGB (default is BGR)
:param tensor_key key to store transformed tensor
"""
def __init__(self, to_rgb=False, tensor_key="imageTensor", bigdl_type="float"):
super(MatToTensor, self).__init__(bigdl_type, to_rgb, tensor_key)
class AspectScale(FeatureTransformer):
"""
Resize the image, keep the aspect ratio. scale according to the short edge
:param min_size scale size, apply to short edge
:param scale_multiple_of make the scaled size multiple of some value
:param max_size max size after scale
:param resize_mode if resizeMode = -1, random select a mode from
(Imgproc.INTER_LINEAR, Imgproc.INTER_CUBIC, Imgproc.INTER_AREA,
Imgproc.INTER_NEAREST, Imgproc.INTER_LANCZOS4)
:param use_scale_factor if true, scale factor fx and fy is used, fx = fy = 0
:aram min_scale control the minimum scale up for image
"""
def __init__(self, min_size, scale_multiple_of = 1, max_size = 1000,
resize_mode = 1, use_scale_factor=True, min_scale=-1.0,
bigdl_type="float"):
super(AspectScale, self).__init__(bigdl_type, min_size, scale_multiple_of, max_size,
resize_mode, use_scale_factor, min_scale)
class RandomAspectScale(FeatureTransformer):
"""
resize the image by randomly choosing a scale
:param scales array of scale options that for random choice
:param scaleMultipleOf Resize test images so that its width and height are multiples of
:param maxSize Max pixel size of the longest side of a scaled input image
"""
def __init__(self, scales, scale_multiple_of = 1, max_size = 1000, bigdl_type="float"):
super(RandomAspectScale, self).__init__(bigdl_type, scales, scale_multiple_of, max_size)
class BytesToMat(FeatureTransformer):
"""
Transform byte array(original image file in byte) to OpenCVMat
:param byte_key key that maps byte array
"""
def __init__(self, byte_key = "bytes", bigdl_type="float"):
super(BytesToMat, self).__init__(bigdl_type, byte_key)
class ImageFrameToSample(FeatureTransformer):
"""
transform imageframe to samples
:param input_keys keys that maps inputs (each input should be a tensor)
:param target_keys keys that maps targets (each target should be a tensor)
:param sample_key key to store sample
"""
def __init__(self, input_keys=["imageTensor"], target_keys=None,
sample_key="sample", bigdl_type="float"):
super(ImageFrameToSample, self).__init__(bigdl_type, input_keys, target_keys, sample_key)
class PixelBytesToMat(FeatureTransformer):
"""
Transform byte array(pixels in byte) to OpenCVMat
:param byte_key key that maps byte array
"""
def __init__(self, byte_key = "bytes", bigdl_type="float"):
super(PixelBytesToMat, self).__init__(bigdl_type, byte_key)
class FixExpand(FeatureTransformer):
"""
Expand image with given expandHeight and expandWidth,
put the original image to the center of expanded image
:param expand_height height expand to
:param expand_width width expand to
"""
def __init__(self, expand_height, expand_width, bigdl_type="float"):
super(FixExpand, self).__init__(bigdl_type, expand_height, expand_width)
class ChannelScaledNormalizer(FeatureTransformer):
"""
Scaled image at channel level with offset and scale
:param mean_r : offset for R channel
:param mean_g : offset for G channel
:param mean_b: offset for B channel
:param scale: scaling factor for all channels
"""
def __init__(self, mean_r, mean_g, mean_b, scale, bigdl_type="float"):
super(ChannelScaledNormalizer, self).__init__(bigdl_type, mean_r, mean_g, mean_b, scale)
class RandomAlterAspect(FeatureTransformer):
"""
Apply random crop based on area ratio and resize to cropLenth size
:param min_area_ratio min area ratio
:param max_area_ratio max area ratio
:param min_aspect_ratio_change factor applied to ratio area
:param interp_mode interp mode applied in resize
:param crop_length final size resized to
"""
def __init__(self, min_area_ratio,
max_area_ratio,
min_aspect_ratio_change,
interp_mode,
crop_length, bigdl_type="float"):
super(RandomAlterAspect, self).__init__(bigdl_type, min_area_ratio,
max_area_ratio,
min_aspect_ratio_change,
interp_mode,
crop_length)
class RandomCropper(FeatureTransformer):
"""
Random cropper on uniform distribution with fixed height & width
:param crop_w width cropped to
:param crop_h height cropped to
:param mirror whether mirror
:param cropper_method crop method
:param channels total channels
"""
def __init__(self, crop_w, crop_h, mirror, cropper_method, channels, bigdl_type="float"):
super(RandomCropper, self).__init__(bigdl_type, crop_w, crop_h, mirror, cropper_method, channels)
class RandomResize(FeatureTransformer):
"""
Random resize between minSize and maxSize and scale height and width to each other
:param min_size min size to resize to
:param max_size max size to resize to
"""
def __init__(self, min_size, max_size, bigdl_type="float"):
super(RandomResize, self).__init__(bigdl_type, min_size, max_size)
class SeqFileFolder(JavaValue):
@classmethod
def files_to_image_frame(cls,
url,
sc,
class_num,
partition_num=-1,
bigdl_type="float"):
"""
Extract hadoop sequence files from an HDFS path as ImageFrame
:param url: sequence files folder path
:param sc: spark context
:param class_num: class number of data
:param partition_num: partition number, default: Engine.nodeNumber() * Engine.coreNumber()
"""
jvalue = callBigDlFunc(bigdl_type,
"seqFilesToImageFrame",
url,
sc,
class_num,
partition_num)
return ImageFrame(jvalue=jvalue)
| 38.310253
| 135
| 0.652184
|
import sys
from bigdl.util.common import JavaValue
from bigdl.util.common import callBigDlFunc
from bigdl.util.common import *
if sys.version >= '3':
long = int
unicode = str
class FeatureTransformer(JavaValue):
def __init__(self, bigdl_type="float", *args):
self.value = callBigDlFunc(
bigdl_type, JavaValue.jvm_class_constructor(self), *args)
def transform(self, image_feature, bigdl_type="float"):
callBigDlFunc(bigdl_type, "transformImageFeature", self.value, image_feature)
return image_feature
def __call__(self, image_frame, bigdl_type="float"):
jframe = callBigDlFunc(bigdl_type,
"transformImageFrame", self.value, image_frame)
return ImageFrame(jvalue=jframe)
class Pipeline(FeatureTransformer):
def __init__(self, transformers, bigdl_type="float"):
for transfomer in transformers:
assert transfomer.__class__.__bases__[0].__name__ == "FeatureTransformer", "the transformer should be " \
"subclass of FeatureTransformer"
super(Pipeline, self).__init__(bigdl_type, transformers)
class ImageFeature(JavaValue):
def __init__(self, image=None, label=None, path=None, bigdl_type="float"):
image_tensor = JTensor.from_ndarray(image) if image is not None else None
label_tensor = JTensor.from_ndarray(label) if label is not None else None
self.bigdl_type = bigdl_type
self.value = callBigDlFunc(
bigdl_type, JavaValue.jvm_class_constructor(self), image_tensor, label_tensor, path)
def get_image(self, float_key="floats", to_chw=True):
tensor = callBigDlFunc(self.bigdl_type, "imageFeatureToImageTensor", self.value,
float_key, to_chw)
return tensor.to_ndarray()
def get_label(self):
label = callBigDlFunc(self.bigdl_type, "imageFeatureToLabelTensor", self.value)
return label.to_ndarray()
def keys(self):
return callBigDlFunc(self.bigdl_type, "imageFeatureGetKeys", self.value)
class ImageFrame(JavaValue):
def __init__(self, jvalue, bigdl_type="float"):
self.value = jvalue
self.bigdl_type = bigdl_type
if self.is_local():
self.image_frame = LocalImageFrame(jvalue=self.value)
else:
self.image_frame = DistributedImageFrame(jvalue=self.value)
@classmethod
def read(cls, path, sc=None, min_partitions=1, bigdl_type="float"):
return ImageFrame(jvalue=callBigDlFunc(bigdl_type, "read", path, sc, min_partitions))
@classmethod
def read_parquet(cls, path, sc, bigdl_type="float"):
return DistributedImageFrame(jvalue=callBigDlFunc(bigdl_type, "readParquet", path, sc))
@classmethod
def write_parquet(cls, path, output, sc, partition_num = 1, bigdl_type="float"):
return callBigDlFunc(bigdl_type, "writeParquet", path, output, sc, partition_num)
def is_local(self):
return callBigDlFunc(self.bigdl_type, "isLocal", self.value)
def is_distributed(self):
return callBigDlFunc(self.bigdl_type, "isDistributed", self.value)
def transform(self, transformer, bigdl_type="float"):
self.value = callBigDlFunc(bigdl_type,
"transformImageFrame", transformer, self.value)
return self
def get_image(self, float_key="floats", to_chw=True):
return self.image_frame.get_image(float_key, to_chw)
def get_label(self):
return self.image_frame.get_label()
def get_predict(self, key="predict"):
return self.image_frame.get_predict(key)
def get_sample(self):
return self.image_frame.get_sample()
def get_uri(self):
return self.image_frame.get_uri()
def set_label(self, label, bigdl_type="float"):
return callBigDlFunc(bigdl_type,
"setLabel", label, self.value)
def random_split(self, weights):
jvalues = self.image_frame.random_split(weights)
return [ImageFrame(jvalue) for jvalue in jvalues]
class LocalImageFrame(ImageFrame):
def __init__(self, image_list=None, label_list=None, jvalue=None, bigdl_type="float"):
assert jvalue or image_list, "jvalue and image_list cannot be None in the same time"
if jvalue:
self.value = jvalue
else:
image_tensor_list = map(lambda image: JTensor.from_ndarray(image), image_list)
label_tensor_list = map(lambda label: JTensor.from_ndarray(label), label_list) if label_list else None
self.value = callBigDlFunc(bigdl_type, JavaValue.jvm_class_constructor(self),
image_tensor_list, label_tensor_list)
self.bigdl_type = bigdl_type
def get_image(self, float_key="floats", to_chw=True):
tensors = callBigDlFunc(self.bigdl_type,
"localImageFrameToImageTensor", self.value, float_key, to_chw)
return map(lambda tensor: tensor.to_ndarray(), tensors)
def get_label(self):
labels = callBigDlFunc(self.bigdl_type, "localImageFrameToLabelTensor", self.value)
return map(lambda tensor: tensor.to_ndarray(), labels)
def get_predict(self, key="predict"):
predicts = callBigDlFunc(self.bigdl_type, "localImageFrameToPredict", self.value, key)
return map(lambda predict: (predict[0], predict[1].to_ndarray()) if predict[1] else (predict[0], None), predicts)
def get_sample(self, key="sample"):
return callBigDlFunc(self.bigdl_type, "localImageFrameToSample", self.value, key)
def get_uri(self, key = "uri"):
return callBigDlFunc(self.bigdl_type, "localImageFrameToUri", self.value, key)
def random_split(self, weights):
raise "random split not supported in LocalImageFrame"
class DistributedImageFrame(ImageFrame):
def __init__(self, image_rdd=None, label_rdd=None, jvalue=None, bigdl_type="float"):
assert jvalue or image_rdd, "jvalue and image_rdd cannot be None in the same time"
if jvalue:
self.value = jvalue
else:
image_tensor_rdd = image_rdd.map(lambda image: JTensor.from_ndarray(image))
label_tensor_rdd = label_rdd.map(lambda label: JTensor.from_ndarray(label)) if label_rdd else None
self.value = callBigDlFunc(bigdl_type, JavaValue.jvm_class_constructor(self),
image_tensor_rdd, label_tensor_rdd)
self.bigdl_type = bigdl_type
def get_image(self, float_key="floats", to_chw=True):
tensor_rdd = callBigDlFunc(self.bigdl_type,
"distributedImageFrameToImageTensorRdd", self.value, float_key, to_chw)
return tensor_rdd.map(lambda tensor: tensor.to_ndarray())
def get_label(self):
tensor_rdd = callBigDlFunc(self.bigdl_type, "distributedImageFrameToLabelTensorRdd", self.value)
return tensor_rdd.map(lambda tensor: tensor.to_ndarray())
def get_predict(self, key="predict"):
predicts = callBigDlFunc(self.bigdl_type, "distributedImageFrameToPredict", self.value, key)
return predicts.map(lambda predict: (predict[0], predict[1].to_ndarray()) if predict[1] else (predict[0], None))
def get_sample(self, key="sample"):
return callBigDlFunc(self.bigdl_type, "distributedImageFrameToSample", self.value, key)
def get_uri(self, key = "uri"):
return callBigDlFunc(self.bigdl_type, "distributedImageFrameToUri", self.value, key)
def random_split(self, weights):
return callBigDlFunc(self.bigdl_type, "distributedImageFrameRandomSplit", self.value, weights)
class HFlip(FeatureTransformer):
def __init__(self, bigdl_type="float"):
super(HFlip, self).__init__(bigdl_type)
class Resize(FeatureTransformer):
def __init__(self, resize_h, resize_w, resize_mode = 1, use_scale_factor=True,
bigdl_type="float"):
super(Resize, self).__init__(bigdl_type, resize_h, resize_w, resize_mode, use_scale_factor)
class Brightness(FeatureTransformer):
def __init__(self, delta_low, delta_high, bigdl_type="float"):
super(Brightness, self).__init__(bigdl_type, delta_low, delta_high)
class ChannelOrder(FeatureTransformer):
def __init__(self, bigdl_type="float"):
super(ChannelOrder, self).__init__(bigdl_type)
class Contrast(FeatureTransformer):
def __init__(self, delta_low, delta_high, bigdl_type="float"):
super(Contrast, self).__init__(bigdl_type, delta_low, delta_high)
class Saturation(FeatureTransformer):
def __init__(self, delta_low, delta_high, bigdl_type="float"):
super(Saturation, self).__init__(bigdl_type, delta_low, delta_high)
class Hue(FeatureTransformer):
def __init__(self, delta_low, delta_high, bigdl_type="float"):
super(Hue, self).__init__(bigdl_type, delta_low, delta_high)
class ChannelNormalize(FeatureTransformer):
def __init__(self, mean_r, mean_g, mean_b, std_r=1.0, std_g=1.0, std_b=1.0, bigdl_type="float"):
super(ChannelNormalize, self).__init__(bigdl_type, mean_r, mean_g, mean_b, std_r, std_g, std_b)
class PixelNormalize(FeatureTransformer):
def __init__(self, means, bigdl_type="float"):
super(PixelNormalize, self).__init__(bigdl_type, means)
class RandomCrop(FeatureTransformer):
def __init__(self, crop_width, crop_height, is_clip=True, bigdl_type="float"):
super(RandomCrop, self).__init__(bigdl_type, crop_width, crop_height, is_clip)
class CenterCrop(FeatureTransformer):
def __init__(self, crop_width, crop_height, is_clip=True, bigdl_type="float"):
super(CenterCrop, self).__init__(bigdl_type, crop_width, crop_height, is_clip)
class FixedCrop(FeatureTransformer):
def __init__(self, x1, y1, x2, y2, normalized=True, is_clip=True, bigdl_type="float"):
super(FixedCrop, self).__init__(bigdl_type, x1, y1, x2, y2, normalized, is_clip)
class DetectionCrop(FeatureTransformer):
def __init__(self, roi_key, normalized=True, bigdl_type="float"):
super(DetectionCrop, self).__init__(bigdl_type, roi_key, normalized)
class Expand(FeatureTransformer):
def __init__(self, means_r=123, means_g=117, means_b=104,
min_expand_ratio=1.0,
max_expand_ratio=4.0, bigdl_type="float"):
super(Expand, self).__init__(bigdl_type, means_r, means_g, means_b,
min_expand_ratio, max_expand_ratio)
class Filler(FeatureTransformer):
def __init__(self, start_x, start_y, end_x, end_y, value = 255, bigdl_type="float"):
super(Filler, self).__init__(bigdl_type, start_x,
start_y,
end_x,
end_y,
value)
class RandomTransformer(FeatureTransformer):
def __init__(self, transformer, prob, bigdl_type="float"):
super(RandomTransformer, self).__init__(bigdl_type, transformer, prob)
class ColorJitter(FeatureTransformer):
def __init__(self, brightness_prob = 0.5,
brightness_delta = 32.0,
contrast_prob = 0.5,
contrast_lower = 0.5,
contrast_upper = 1.5,
hue_prob = 0.5,
hue_delta = 18.0,
saturation_prob = 0.5,
saturation_lower = 0.5,
saturation_upper = 1.5,
random_order_prob = 0.0,
shuffle = False,
bigdl_type="float"):
super(ColorJitter, self).__init__(bigdl_type, brightness_prob,
brightness_delta,
contrast_prob,
contrast_lower,
contrast_upper,
hue_prob,
hue_delta,
saturation_prob,
saturation_lower,
saturation_upper,
random_order_prob,
shuffle)
class RandomSampler(FeatureTransformer):
def __init__(self):
super(RandomSampler, self).__init__(bigdl_type)
class RoiProject(FeatureTransformer):
def __init__(self, need_meet_center_constraint, bigdl_type="float"):
super(RoiProject, self).__init__(bigdl_type, need_meet_center_constraint)
class RoiHFlip(FeatureTransformer):
def __init__(self, normalized=True, bigdl_type="float"):
super(RoiHFlip, self).__init__(bigdl_type, normalized)
class RoiResize(FeatureTransformer):
def __init__(self, normalized=True, bigdl_type="float"):
super(RoiResize, self).__init__(bigdl_type, normalized)
class RoiNormalize(FeatureTransformer):
def __init__(self, bigdl_type="float"):
super(RoiNormalize, self).__init__(bigdl_type)
class MatToFloats(FeatureTransformer):
def __init__(self, valid_height=300, valid_width=300, valid_channel=300,
out_key = "floats", share_buffer=True, bigdl_type="float"):
super(MatToFloats, self).__init__(bigdl_type, valid_height, valid_width, valid_channel,
out_key, share_buffer)
class MatToTensor(FeatureTransformer):
def __init__(self, to_rgb=False, tensor_key="imageTensor", bigdl_type="float"):
super(MatToTensor, self).__init__(bigdl_type, to_rgb, tensor_key)
class AspectScale(FeatureTransformer):
def __init__(self, min_size, scale_multiple_of = 1, max_size = 1000,
resize_mode = 1, use_scale_factor=True, min_scale=-1.0,
bigdl_type="float"):
super(AspectScale, self).__init__(bigdl_type, min_size, scale_multiple_of, max_size,
resize_mode, use_scale_factor, min_scale)
class RandomAspectScale(FeatureTransformer):
def __init__(self, scales, scale_multiple_of = 1, max_size = 1000, bigdl_type="float"):
super(RandomAspectScale, self).__init__(bigdl_type, scales, scale_multiple_of, max_size)
class BytesToMat(FeatureTransformer):
def __init__(self, byte_key = "bytes", bigdl_type="float"):
super(BytesToMat, self).__init__(bigdl_type, byte_key)
class ImageFrameToSample(FeatureTransformer):
def __init__(self, input_keys=["imageTensor"], target_keys=None,
sample_key="sample", bigdl_type="float"):
super(ImageFrameToSample, self).__init__(bigdl_type, input_keys, target_keys, sample_key)
class PixelBytesToMat(FeatureTransformer):
def __init__(self, byte_key = "bytes", bigdl_type="float"):
super(PixelBytesToMat, self).__init__(bigdl_type, byte_key)
class FixExpand(FeatureTransformer):
def __init__(self, expand_height, expand_width, bigdl_type="float"):
super(FixExpand, self).__init__(bigdl_type, expand_height, expand_width)
class ChannelScaledNormalizer(FeatureTransformer):
def __init__(self, mean_r, mean_g, mean_b, scale, bigdl_type="float"):
super(ChannelScaledNormalizer, self).__init__(bigdl_type, mean_r, mean_g, mean_b, scale)
class RandomAlterAspect(FeatureTransformer):
def __init__(self, min_area_ratio,
max_area_ratio,
min_aspect_ratio_change,
interp_mode,
crop_length, bigdl_type="float"):
super(RandomAlterAspect, self).__init__(bigdl_type, min_area_ratio,
max_area_ratio,
min_aspect_ratio_change,
interp_mode,
crop_length)
class RandomCropper(FeatureTransformer):
def __init__(self, crop_w, crop_h, mirror, cropper_method, channels, bigdl_type="float"):
super(RandomCropper, self).__init__(bigdl_type, crop_w, crop_h, mirror, cropper_method, channels)
class RandomResize(FeatureTransformer):
def __init__(self, min_size, max_size, bigdl_type="float"):
super(RandomResize, self).__init__(bigdl_type, min_size, max_size)
class SeqFileFolder(JavaValue):
@classmethod
def files_to_image_frame(cls,
url,
sc,
class_num,
partition_num=-1,
bigdl_type="float"):
jvalue = callBigDlFunc(bigdl_type,
"seqFilesToImageFrame",
url,
sc,
class_num,
partition_num)
return ImageFrame(jvalue=jvalue)
| true
| true
|
f71793b555d90d054bf1905d45ea0235b077f075
| 2,312
|
py
|
Python
|
pyramid_debugtoolbar/panels/traceback.py
|
rollbar/pyramid_debugtoolbar
|
dab4278eb68b801b1d3e9679cf1308096c3f849f
|
[
"Apache-2.0"
] | null | null | null |
pyramid_debugtoolbar/panels/traceback.py
|
rollbar/pyramid_debugtoolbar
|
dab4278eb68b801b1d3e9679cf1308096c3f849f
|
[
"Apache-2.0"
] | null | null | null |
pyramid_debugtoolbar/panels/traceback.py
|
rollbar/pyramid_debugtoolbar
|
dab4278eb68b801b1d3e9679cf1308096c3f849f
|
[
"Apache-2.0"
] | 1
|
2021-02-21T12:18:04.000Z
|
2021-02-21T12:18:04.000Z
|
import re
from pyramid_debugtoolbar.tbtools import Traceback
from pyramid_debugtoolbar.panels import DebugPanel
from pyramid_debugtoolbar.utils import escape
from pyramid_debugtoolbar.utils import STATIC_PATH
from pyramid_debugtoolbar.utils import ROOT_ROUTE_NAME
from pyramid_debugtoolbar.utils import EXC_ROUTE_NAME
_ = lambda x: x
class TracebackPanel(DebugPanel):
name = 'Traceback'
template = 'pyramid_debugtoolbar.panels:templates/traceback.dbtmako'
def __init__(self, request):
self.request = request
self.exc_history = request.exc_history
def nav_title(self):
return _("Traceback")
def nav_subtitle(self):
return ""
def title(self):
return _("Traceback")
def url(self):
return ""
@property
def has_content(self):
if hasattr(self.request, 'pdbt_tb'):
return True
else:
return False
def process_response(self, response):
if self.has_content:
traceback = self.request.pdbt_tb
exc = escape(traceback.exception)
summary = Traceback.render_summary(traceback, include_title=False, request=self.request)
token = self.request.registry.pdtb_token
url = '' # self.request.route_url(EXC_ROUTE_NAME, _query=qs)
evalex = self.exc_history.eval_exc
self.data = {
'evalex': evalex and 'true' or 'false',
'console': 'false',
'lodgeit_url': None,
'title': exc,
'exception': exc,
'exception_type': escape(traceback.exception_type),
'summary': summary,
'plaintext': traceback.plaintext,
'plaintext_cs': re.sub('-{2,}', '-', traceback.plaintext),
'traceback_id': traceback.id,
'token': token,
'url': url,
}
def render_content(self, request):
return super(TracebackPanel, self).render_content(request)
def render_vars(self, request):
return {
'static_path': request.static_url(STATIC_PATH),
'root_path': request.route_url(ROOT_ROUTE_NAME)
}
| 32.111111
| 100
| 0.588668
|
import re
from pyramid_debugtoolbar.tbtools import Traceback
from pyramid_debugtoolbar.panels import DebugPanel
from pyramid_debugtoolbar.utils import escape
from pyramid_debugtoolbar.utils import STATIC_PATH
from pyramid_debugtoolbar.utils import ROOT_ROUTE_NAME
from pyramid_debugtoolbar.utils import EXC_ROUTE_NAME
_ = lambda x: x
class TracebackPanel(DebugPanel):
name = 'Traceback'
template = 'pyramid_debugtoolbar.panels:templates/traceback.dbtmako'
def __init__(self, request):
self.request = request
self.exc_history = request.exc_history
def nav_title(self):
return _("Traceback")
def nav_subtitle(self):
return ""
def title(self):
return _("Traceback")
def url(self):
return ""
@property
def has_content(self):
if hasattr(self.request, 'pdbt_tb'):
return True
else:
return False
def process_response(self, response):
if self.has_content:
traceback = self.request.pdbt_tb
exc = escape(traceback.exception)
summary = Traceback.render_summary(traceback, include_title=False, request=self.request)
token = self.request.registry.pdtb_token
url = ''
evalex = self.exc_history.eval_exc
self.data = {
'evalex': evalex and 'true' or 'false',
'console': 'false',
'lodgeit_url': None,
'title': exc,
'exception': exc,
'exception_type': escape(traceback.exception_type),
'summary': summary,
'plaintext': traceback.plaintext,
'plaintext_cs': re.sub('-{2,}', '-', traceback.plaintext),
'traceback_id': traceback.id,
'token': token,
'url': url,
}
def render_content(self, request):
return super(TracebackPanel, self).render_content(request)
def render_vars(self, request):
return {
'static_path': request.static_url(STATIC_PATH),
'root_path': request.route_url(ROOT_ROUTE_NAME)
}
| true
| true
|
f717942ddd2886f2dcfba64b6e7601b1de639677
| 571
|
py
|
Python
|
test/api_test.py
|
kuro2a/kiku
|
d4e6500970a20d1955f1773e0e2cfb8e2db819ba
|
[
"MIT"
] | 2
|
2019-08-14T14:32:36.000Z
|
2019-08-15T08:28:15.000Z
|
test/api_test.py
|
kuro2a/kiku
|
d4e6500970a20d1955f1773e0e2cfb8e2db819ba
|
[
"MIT"
] | 1
|
2019-10-02T16:35:05.000Z
|
2019-10-02T16:35:05.000Z
|
test/api_test.py
|
kuro2a/kiku
|
d4e6500970a20d1955f1773e0e2cfb8e2db819ba
|
[
"MIT"
] | 1
|
2019-08-14T14:33:01.000Z
|
2019-08-14T14:33:01.000Z
|
#!/usr/bin/python3
import sys
import pathlib
from datetime import datetime
import pytest
from falcon import testing
sys.path.append( str(pathlib.Path(__file__).resolve().parent) + '/../' )
import main
@pytest.fixture()
def client():
return testing.TestClient(main.create_service())
def test_api_version(client):
doc = {
u'meta': {
u'status': u'OK',
u'message': u'OK',
u'timestamp': datetime.now()
}, u'data': None
}
result = client.simulate_get('/api/v1/version')
assert result.json == doc
| 18.419355
| 72
| 0.623468
|
import sys
import pathlib
from datetime import datetime
import pytest
from falcon import testing
sys.path.append( str(pathlib.Path(__file__).resolve().parent) + '/../' )
import main
@pytest.fixture()
def client():
return testing.TestClient(main.create_service())
def test_api_version(client):
doc = {
u'meta': {
u'status': u'OK',
u'message': u'OK',
u'timestamp': datetime.now()
}, u'data': None
}
result = client.simulate_get('/api/v1/version')
assert result.json == doc
| true
| true
|
f7179491275351836a0592d841bc9b9fe7d43c7d
| 151
|
py
|
Python
|
src/GracefulKiller/__init__.py
|
MaxMaxoff/GracefulKiller
|
dab06ecc7573211ae7acf90e5f889e37d48a88d2
|
[
"MIT"
] | 1
|
2021-10-04T09:09:12.000Z
|
2021-10-04T09:09:12.000Z
|
src/GracefulKiller/__init__.py
|
MaxMaxoff/GracefulKiller
|
dab06ecc7573211ae7acf90e5f889e37d48a88d2
|
[
"MIT"
] | null | null | null |
src/GracefulKiller/__init__.py
|
MaxMaxoff/GracefulKiller
|
dab06ecc7573211ae7acf90e5f889e37d48a88d2
|
[
"MIT"
] | null | null | null |
try:
from GracefulKiller.GracefulKiller import GracefulKiller, Loop
except:
from src.GracefulKiller.GracefulKiller import GracefulKiller, Loop
| 30.2
| 70
| 0.821192
|
try:
from GracefulKiller.GracefulKiller import GracefulKiller, Loop
except:
from src.GracefulKiller.GracefulKiller import GracefulKiller, Loop
| true
| true
|
f71794ea091735736fa90a59266f6bb2161ac032
| 1,897
|
py
|
Python
|
tests/test_test.py
|
yehzhang/RapidTest
|
2302fc10ddafba1d16ef1d7448d46c66f5a05da2
|
[
"MIT"
] | null | null | null |
tests/test_test.py
|
yehzhang/RapidTest
|
2302fc10ddafba1d16ef1d7448d46c66f5a05da2
|
[
"MIT"
] | null | null | null |
tests/test_test.py
|
yehzhang/RapidTest
|
2302fc10ddafba1d16ef1d7448d46c66f5a05da2
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from rapidtest import Result, Test, Case, TreeNode
class TestTest(TestCase):
def test_check_result(self):
t = Test(list, operation=True)
t.add_case(Case('append', [1],
'pop', Result(1),
'append', [2],
'append', [3],
'pop',
'pop', Result(2)))
t.add_case(Case('append', [1],
'pop',
'append', [2],
'append', [3],
'pop',
'pop', result=list))
t.run()
# Test if output difference is detected
# t.add_case(Case('append', [1],
# 'pop', Result(2)))
# t.add_case(Case('append', [1],
# 'pop', Result(1)))
# with self.assertRaisesRegexp(ValueError, r'differ'):
# t.run()
# t.run()
def test_summary(self):
def assert_sum_code(c):
code, _ = t.summary()
self.assertEqual(code, c)
t = Test(list, operation=True)
assert_sum_code(t.EXIT_EMPTY)
t.add_case(Case('append', [1], 'pop', Result(1)))
t.run()
assert_sum_code(t.EXIT_PASS)
t.add_case(Case('pop', Result(None)))
# assert_sum_code(t.EXIT_PENDING)
with self.assertRaises(IndexError):
t.run()
assert_sum_code(t.EXIT_FAIL)
t.add_case(Case('append', [1], Result(None)))
# assert_sum_code(t.EXIT_PENDING)
t.run()
assert_sum_code(t.EXIT_FAIL)
def f(i):
if i == 0:
return Case('append', [1], Result(None))
raise ValueError
t.add_func(f)
with self.assertRaises(ValueError):
t.run()
assert_sum_code(t.EXIT_GEN_ERR)
| 29.640625
| 62
| 0.470216
|
from unittest import TestCase
from rapidtest import Result, Test, Case, TreeNode
class TestTest(TestCase):
def test_check_result(self):
t = Test(list, operation=True)
t.add_case(Case('append', [1],
'pop', Result(1),
'append', [2],
'append', [3],
'pop',
'pop', Result(2)))
t.add_case(Case('append', [1],
'pop',
'append', [2],
'append', [3],
'pop',
'pop', result=list))
t.run()
def test_summary(self):
def assert_sum_code(c):
code, _ = t.summary()
self.assertEqual(code, c)
t = Test(list, operation=True)
assert_sum_code(t.EXIT_EMPTY)
t.add_case(Case('append', [1], 'pop', Result(1)))
t.run()
assert_sum_code(t.EXIT_PASS)
t.add_case(Case('pop', Result(None)))
with self.assertRaises(IndexError):
t.run()
assert_sum_code(t.EXIT_FAIL)
t.add_case(Case('append', [1], Result(None)))
t.run()
assert_sum_code(t.EXIT_FAIL)
def f(i):
if i == 0:
return Case('append', [1], Result(None))
raise ValueError
t.add_func(f)
with self.assertRaises(ValueError):
t.run()
assert_sum_code(t.EXIT_GEN_ERR)
| true
| true
|
f7179508810234674ea1f3cf934a800af733803f
| 794
|
py
|
Python
|
gaphor/RAAML/modelinglanguage.py
|
mrmonkington/gaphor
|
f0fcd4deb90d24b14723840a689fac901f645a43
|
[
"Apache-2.0"
] | 867
|
2018-01-09T00:19:09.000Z
|
2022-03-31T02:49:23.000Z
|
gaphor/RAAML/modelinglanguage.py
|
mrmonkington/gaphor
|
f0fcd4deb90d24b14723840a689fac901f645a43
|
[
"Apache-2.0"
] | 790
|
2018-01-13T23:47:07.000Z
|
2022-03-31T16:04:27.000Z
|
gaphor/RAAML/modelinglanguage.py
|
sitedata/gaphor
|
c83eff0bd595d1a8e766a157f0268e5206eed22c
|
[
"Apache-2.0"
] | 117
|
2018-01-09T02:24:49.000Z
|
2022-03-23T08:07:42.000Z
|
"""The RAAML Modeling Language module is the entrypoint for RAAML related
assets."""
import gaphor.SysML.propertypages # noqa
from gaphor.abc import ModelingLanguage
from gaphor.core import gettext
from gaphor.diagram.diagramtoolbox import ToolboxDefinition
from gaphor.RAAML import diagramitems, raaml
from gaphor.RAAML.toolbox import raaml_toolbox_actions
class RAAMLModelingLanguage(ModelingLanguage):
@property
def name(self) -> str:
return gettext("RAAML")
@property
def toolbox_definition(self) -> ToolboxDefinition:
return raaml_toolbox_actions
def lookup_element(self, name):
element_type = getattr(raaml, name, None)
if not element_type:
element_type = getattr(diagramitems, name, None)
return element_type
| 30.538462
| 73
| 0.746851
|
import gaphor.SysML.propertypages
from gaphor.abc import ModelingLanguage
from gaphor.core import gettext
from gaphor.diagram.diagramtoolbox import ToolboxDefinition
from gaphor.RAAML import diagramitems, raaml
from gaphor.RAAML.toolbox import raaml_toolbox_actions
class RAAMLModelingLanguage(ModelingLanguage):
@property
def name(self) -> str:
return gettext("RAAML")
@property
def toolbox_definition(self) -> ToolboxDefinition:
return raaml_toolbox_actions
def lookup_element(self, name):
element_type = getattr(raaml, name, None)
if not element_type:
element_type = getattr(diagramitems, name, None)
return element_type
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.