hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f720e6032cfc7932950462b55a729037d787591f | 404 | py | Python | AboutModel/migrations/0006_person_upload.py | jinjinanan/HelloDjango1 | d1174b72341946f0575df37236d85983facc1bc6 | [
"MIT"
] | null | null | null | AboutModel/migrations/0006_person_upload.py | jinjinanan/HelloDjango1 | d1174b72341946f0575df37236d85983facc1bc6 | [
"MIT"
] | null | null | null | AboutModel/migrations/0006_person_upload.py | jinjinanan/HelloDjango1 | d1174b72341946f0575df37236d85983facc1bc6 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.1 on 2018-09-26 09:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('AboutModel', '0005_auto_20180926_1639'),
]
operations = [
migrations.AddField(
model_name='person',
name='upload',
field=models.FileField(default='', upload_to='media/'),
),
]
| 21.263158 | 67 | 0.596535 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('AboutModel', '0005_auto_20180926_1639'),
]
operations = [
migrations.AddField(
model_name='person',
name='upload',
field=models.FileField(default='', upload_to='media/'),
),
]
| true | true |
f720e775b9e53621d7ef0b929530a0e01f683291 | 216 | py | Python | display/display/handlers/calendar/calendar.py | owlsn/h_crawl | c0431ee6484e61d9339553c3350962ea517749d6 | [
"MIT"
] | null | null | null | display/display/handlers/calendar/calendar.py | owlsn/h_crawl | c0431ee6484e61d9339553c3350962ea517749d6 | [
"MIT"
] | 8 | 2021-03-18T20:33:29.000Z | 2022-03-11T23:21:04.000Z | display/display/handlers/calendar/calendar.py | owlsn/h_crawl | c0431ee6484e61d9339553c3350962ea517749d6 | [
"MIT"
] | null | null | null | from display.handlers.base import BaseHandler
class CalendarHandler(BaseHandler):
def get(self):
title = 'CalendarHandler'
self.render('calendar/calendar.html', title = title, **self.render_dict) | 36 | 80 | 0.722222 | from display.handlers.base import BaseHandler
class CalendarHandler(BaseHandler):
def get(self):
title = 'CalendarHandler'
self.render('calendar/calendar.html', title = title, **self.render_dict) | true | true |
f720e782756412b8e32b05c6b3b8cd42bb215506 | 298 | py | Python | 1.py | lorenaEscobar0014/TALLER-DE-FOR | a448358b336d6e240ff3017a9c44d7df67bf173e | [
"MIT"
] | null | null | null | 1.py | lorenaEscobar0014/TALLER-DE-FOR | a448358b336d6e240ff3017a9c44d7df67bf173e | [
"MIT"
] | null | null | null | 1.py | lorenaEscobar0014/TALLER-DE-FOR | a448358b336d6e240ff3017a9c44d7df67bf173e | [
"MIT"
] | null | null | null | archivo = open('paises.txt', 'r')
lista = []
ciudad = []
for i in archivo:
a = i.index(":")
for r in range(a+2, len(i)):
lista.append(i[r])
a = "".join(lista)
ciudad.append(a)
lista = []
for i in ciudad:
if(i[0] == "M"):
print(i)
lista.append(i)
print(len(lista))
archivo.close() | 18.625 | 33 | 0.57047 | archivo = open('paises.txt', 'r')
lista = []
ciudad = []
for i in archivo:
a = i.index(":")
for r in range(a+2, len(i)):
lista.append(i[r])
a = "".join(lista)
ciudad.append(a)
lista = []
for i in ciudad:
if(i[0] == "M"):
print(i)
lista.append(i)
print(len(lista))
archivo.close() | true | true |
f720e79407295f9aac9a3426d1cae24917442d5c | 2,720 | py | Python | src/pipelines/vaccinations/se_authority.py | chrismayemba/covid-19-open-data | cacecb05cd8277f8e61b6e7932915826f41af24b | [
"Apache-2.0"
] | 1 | 2021-10-21T15:24:08.000Z | 2021-10-21T15:24:08.000Z | src/pipelines/vaccinations/se_authority.py | chrismayemba/covid-19-open-data | cacecb05cd8277f8e61b6e7932915826f41af24b | [
"Apache-2.0"
] | null | null | null | src/pipelines/vaccinations/se_authority.py | chrismayemba/covid-19-open-data | cacecb05cd8277f8e61b6e7932915826f41af24b | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from typing import Any, Dict
from pandas import DataFrame, concat
from lib.data_source import DataSource
from lib.time import datetime_isoformat
from lib.utils import aggregate_admin_level, table_merge, table_rename
from pipelines.epidemiology.it_authority import _subregion1_code_converter
_column_adapter = {
"Vecka": "week",
"År": "year",
"Region": "match_string",
"Antal vaccinerade": "_total_doses",
# "Andel vaccinerade": "",
"Dosnummer": "_dose_type",
}
class SwedenDataSource(DataSource):
def parse_dataframes(
self, dataframes: Dict[Any, DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
data = table_rename(dataframes[0], _column_adapter, drop=True)
# Convert date to ISO format
data["date"] = data["year"].apply(lambda x: datetime.datetime.strptime(str(x), "%Y"))
data["date"] = data["date"] + data["week"].apply(lambda x: datetime.timedelta(weeks=x))
data["date"] = data["date"].apply(lambda x: x.date().isoformat())
data = data.drop(columns=["week", "year"])
# Process 1-dose and 2-dose separately
data_1_dose = data[data["_dose_type"].str.slice(-1) == "1"].drop(columns=["_dose_type"])
data_2_dose = data[data["_dose_type"].str.slice(-1) == "2"].drop(columns=["_dose_type"])
data_1_dose = data_1_dose.rename(columns={"_total_doses": "total_persons_vaccinated"})
data_2_dose = data_2_dose.rename(columns={"_total_doses": "total_persons_fully_vaccinated"})
data = table_merge([data_1_dose, data_2_dose], how="outer")
# Make sure only subregion1 matches
data["key"] = None
data["country_code"] = "SE"
data["subregion2_code"] = None
data["locality_code"] = None
# Country totals are reported using a special name
data.loc[data["match_string"] == "| Sverige |", "key"] = "SE"
# Estimate the total doses from person counts
data["total_vaccine_doses_administered"] = (
data["total_persons_vaccinated"] + data["total_persons_fully_vaccinated"]
)
return data
| 40 | 100 | 0.683088 |
import datetime
from typing import Any, Dict
from pandas import DataFrame, concat
from lib.data_source import DataSource
from lib.time import datetime_isoformat
from lib.utils import aggregate_admin_level, table_merge, table_rename
from pipelines.epidemiology.it_authority import _subregion1_code_converter
_column_adapter = {
"Vecka": "week",
"År": "year",
"Region": "match_string",
"Antal vaccinerade": "_total_doses",
"Dosnummer": "_dose_type",
}
class SwedenDataSource(DataSource):
def parse_dataframes(
self, dataframes: Dict[Any, DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
data = table_rename(dataframes[0], _column_adapter, drop=True)
data["date"] = data["year"].apply(lambda x: datetime.datetime.strptime(str(x), "%Y"))
data["date"] = data["date"] + data["week"].apply(lambda x: datetime.timedelta(weeks=x))
data["date"] = data["date"].apply(lambda x: x.date().isoformat())
data = data.drop(columns=["week", "year"])
data_1_dose = data[data["_dose_type"].str.slice(-1) == "1"].drop(columns=["_dose_type"])
data_2_dose = data[data["_dose_type"].str.slice(-1) == "2"].drop(columns=["_dose_type"])
data_1_dose = data_1_dose.rename(columns={"_total_doses": "total_persons_vaccinated"})
data_2_dose = data_2_dose.rename(columns={"_total_doses": "total_persons_fully_vaccinated"})
data = table_merge([data_1_dose, data_2_dose], how="outer")
data["key"] = None
data["country_code"] = "SE"
data["subregion2_code"] = None
data["locality_code"] = None
data.loc[data["match_string"] == "| Sverige |", "key"] = "SE"
data["total_vaccine_doses_administered"] = (
data["total_persons_vaccinated"] + data["total_persons_fully_vaccinated"]
)
return data
| true | true |
f720e7b3881bb7f2ca7c123f52d4f902222b4dac | 2,385 | py | Python | imblearn/under_sampling/_prototype_selection/tests/test_instance_hardness_threshold.py | laurallu/imbalanced-learn | 321b751f90ef8faaec6b39218f8c531893e9e79f | [
"MIT"
] | null | null | null | imblearn/under_sampling/_prototype_selection/tests/test_instance_hardness_threshold.py | laurallu/imbalanced-learn | 321b751f90ef8faaec6b39218f8c531893e9e79f | [
"MIT"
] | null | null | null | imblearn/under_sampling/_prototype_selection/tests/test_instance_hardness_threshold.py | laurallu/imbalanced-learn | 321b751f90ef8faaec6b39218f8c531893e9e79f | [
"MIT"
] | null | null | null | """Test the module ."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import pytest
import numpy as np
from sklearn.ensemble import GradientBoostingClassifier
from imblearn.under_sampling import InstanceHardnessThreshold
RND_SEED = 0
X = np.array(
[
[-0.3879569, 0.6894251],
[-0.09322739, 1.28177189],
[-0.77740357, 0.74097941],
[0.91542919, -0.65453327],
[-0.03852113, 0.40910479],
[-0.43877303, 1.07366684],
[-0.85795321, 0.82980738],
[-0.18430329, 0.52328473],
[-0.30126957, -0.66268378],
[-0.65571327, 0.42412021],
[-0.28305528, 0.30284991],
[0.20246714, -0.34727125],
[1.06446472, -1.09279772],
[0.30543283, -0.02589502],
[-0.00717161, 0.00318087],
]
)
Y = np.array([0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0])
ESTIMATOR = GradientBoostingClassifier(random_state=RND_SEED)
def test_iht_init():
sampling_strategy = "auto"
iht = InstanceHardnessThreshold(
ESTIMATOR, sampling_strategy=sampling_strategy, random_state=RND_SEED
)
assert iht.sampling_strategy == sampling_strategy
assert iht.random_state == RND_SEED
def test_iht_fit_resample():
iht = InstanceHardnessThreshold(ESTIMATOR, random_state=RND_SEED)
X_resampled, y_resampled = iht.fit_resample(X, Y)
assert X_resampled.shape == (12, 2)
assert y_resampled.shape == (12,)
def test_iht_fit_resample_half():
sampling_strategy = {0: 6, 1: 8}
iht = InstanceHardnessThreshold(
ESTIMATOR, sampling_strategy=sampling_strategy, random_state=RND_SEED
)
X_resampled, y_resampled = iht.fit_resample(X, Y)
assert X_resampled.shape == (14, 2)
assert y_resampled.shape == (14,)
def test_iht_fit_resample_class_obj():
est = GradientBoostingClassifier(random_state=RND_SEED)
iht = InstanceHardnessThreshold(estimator=est, random_state=RND_SEED)
X_resampled, y_resampled = iht.fit_resample(X, Y)
assert X_resampled.shape == (12, 2)
assert y_resampled.shape == (12,)
def test_iht_fit_resample_wrong_class_obj():
from sklearn.cluster import KMeans
est = KMeans()
iht = InstanceHardnessThreshold(estimator=est, random_state=RND_SEED)
with pytest.raises(ValueError, match="Invalid parameter `estimator`"):
iht.fit_resample(X, Y)
| 30.189873 | 77 | 0.678826 |
import pytest
import numpy as np
from sklearn.ensemble import GradientBoostingClassifier
from imblearn.under_sampling import InstanceHardnessThreshold
RND_SEED = 0
X = np.array(
[
[-0.3879569, 0.6894251],
[-0.09322739, 1.28177189],
[-0.77740357, 0.74097941],
[0.91542919, -0.65453327],
[-0.03852113, 0.40910479],
[-0.43877303, 1.07366684],
[-0.85795321, 0.82980738],
[-0.18430329, 0.52328473],
[-0.30126957, -0.66268378],
[-0.65571327, 0.42412021],
[-0.28305528, 0.30284991],
[0.20246714, -0.34727125],
[1.06446472, -1.09279772],
[0.30543283, -0.02589502],
[-0.00717161, 0.00318087],
]
)
Y = np.array([0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0])
ESTIMATOR = GradientBoostingClassifier(random_state=RND_SEED)
def test_iht_init():
sampling_strategy = "auto"
iht = InstanceHardnessThreshold(
ESTIMATOR, sampling_strategy=sampling_strategy, random_state=RND_SEED
)
assert iht.sampling_strategy == sampling_strategy
assert iht.random_state == RND_SEED
def test_iht_fit_resample():
iht = InstanceHardnessThreshold(ESTIMATOR, random_state=RND_SEED)
X_resampled, y_resampled = iht.fit_resample(X, Y)
assert X_resampled.shape == (12, 2)
assert y_resampled.shape == (12,)
def test_iht_fit_resample_half():
sampling_strategy = {0: 6, 1: 8}
iht = InstanceHardnessThreshold(
ESTIMATOR, sampling_strategy=sampling_strategy, random_state=RND_SEED
)
X_resampled, y_resampled = iht.fit_resample(X, Y)
assert X_resampled.shape == (14, 2)
assert y_resampled.shape == (14,)
def test_iht_fit_resample_class_obj():
est = GradientBoostingClassifier(random_state=RND_SEED)
iht = InstanceHardnessThreshold(estimator=est, random_state=RND_SEED)
X_resampled, y_resampled = iht.fit_resample(X, Y)
assert X_resampled.shape == (12, 2)
assert y_resampled.shape == (12,)
def test_iht_fit_resample_wrong_class_obj():
from sklearn.cluster import KMeans
est = KMeans()
iht = InstanceHardnessThreshold(estimator=est, random_state=RND_SEED)
with pytest.raises(ValueError, match="Invalid parameter `estimator`"):
iht.fit_resample(X, Y)
| true | true |
f720e859b033940aead6b8c6f677e377794adbc7 | 798 | py | Python | piton/lib/readchar/readchar.py | piton-package-manager/PPM | 19015b76184befe1e2daa63189a13b039787868d | [
"MIT"
] | 19 | 2016-04-08T04:00:07.000Z | 2021-11-12T19:36:56.000Z | piton/lib/readchar/readchar.py | LookLikeAPro/PPM | 19015b76184befe1e2daa63189a13b039787868d | [
"MIT"
] | 9 | 2017-01-03T13:39:47.000Z | 2022-01-15T20:38:20.000Z | piton/lib/readchar/readchar.py | LookLikeAPro/PPM | 19015b76184befe1e2daa63189a13b039787868d | [
"MIT"
] | 6 | 2017-04-01T03:38:45.000Z | 2021-05-06T11:25:31.000Z | # -*- coding: utf-8 -*-
# This file is based on this gist:
# http://code.activestate.com/recipes/134892/
# So real authors are DannyYoo and company.
import sys
if sys.platform.startswith('linux'):
from .readchar_linux import readchar
elif sys.platform == 'darwin':
from .readchar_linux import readchar
elif sys.platform in ('win32', 'cygwin'):
from .readchar_windows import readchar
else:
raise NotImplemented('The platform %s is not supported yet' % sys.platform)
def readkey(getchar_fn=None):
getchar = getchar_fn or readchar
c1 = getchar()
if ord(c1) != 0x1b:
return c1
c2 = getchar()
if ord(c2) != 0x5b:
return c1 + c2
c3 = getchar()
if ord(c3) != 0x33:
return c1 + c2 + c3
c4 = getchar()
return c1 + c2 + c3 + c4
| 25.741935 | 79 | 0.645363 |
import sys
if sys.platform.startswith('linux'):
from .readchar_linux import readchar
elif sys.platform == 'darwin':
from .readchar_linux import readchar
elif sys.platform in ('win32', 'cygwin'):
from .readchar_windows import readchar
else:
raise NotImplemented('The platform %s is not supported yet' % sys.platform)
def readkey(getchar_fn=None):
getchar = getchar_fn or readchar
c1 = getchar()
if ord(c1) != 0x1b:
return c1
c2 = getchar()
if ord(c2) != 0x5b:
return c1 + c2
c3 = getchar()
if ord(c3) != 0x33:
return c1 + c2 + c3
c4 = getchar()
return c1 + c2 + c3 + c4
| true | true |
f720e8b77258c01a05c510ec80e3283dcdbe46b3 | 1,698 | py | Python | leetcode/combination_sum_III.py | sci-c0/python-misc-problems | a0827cc9cd290ca142bba3b7dda307234da63c3c | [
"BSD-3-Clause"
] | null | null | null | leetcode/combination_sum_III.py | sci-c0/python-misc-problems | a0827cc9cd290ca142bba3b7dda307234da63c3c | [
"BSD-3-Clause"
] | null | null | null | leetcode/combination_sum_III.py | sci-c0/python-misc-problems | a0827cc9cd290ca142bba3b7dda307234da63c3c | [
"BSD-3-Clause"
] | null | null | null | """
https://leetcode.com/problems/combination-sum-iii/
Tags: Practice; Concepts; Algorithms; Recursion/BackTracking; Medium
"""
from typing import List
class Solution:
def combinationSum3(self, k: int, n: int) -> List[List[int]]:
# Create a list of nums to choose fromx
return self.combinations(list(range(1, 10)), [], n, k)
def combinations(self, nums: List[int], combi: List[int], s: int, k: int):
ans = []
# If only one slot is remaining, we do not need further recursion...
if len(combi) == k - 1:
# ... just Check if the remaining sum is present in the `nums` as a single number
if s in nums:
return [combi + [s]]
else:
return None
# Algorithm
for i, v in enumerate(nums):
remaining_sum = s - v
# Since we have a sorted the array of nums to chose from, hence, to avoid unnecessary recursive calls,
# check if remaining sum is greater than the current value
if remaining_sum > v:
# Since, we can't have duplicates, nor can we have permutations of already chosen combinations,
# We will pass only the remaining array to the next recursion.
remaining_list_to_chose_from = nums[i + 1:]
# Append the current value to the combination
new_combi = combi + [v]
final_combi = self.combinations(remaining_list_to_chose_from, new_combi, remaining_sum, k)
if final_combi is not None:
ans.extend(final_combi)
else:
break
return ans
| 32.653846 | 114 | 0.579505 |
from typing import List
class Solution:
def combinationSum3(self, k: int, n: int) -> List[List[int]]:
return self.combinations(list(range(1, 10)), [], n, k)
def combinations(self, nums: List[int], combi: List[int], s: int, k: int):
ans = []
if len(combi) == k - 1:
if s in nums:
return [combi + [s]]
else:
return None
for i, v in enumerate(nums):
remaining_sum = s - v
if remaining_sum > v:
# We will pass only the remaining array to the next recursion.
remaining_list_to_chose_from = nums[i + 1:]
# Append the current value to the combination
new_combi = combi + [v]
final_combi = self.combinations(remaining_list_to_chose_from, new_combi, remaining_sum, k)
if final_combi is not None:
ans.extend(final_combi)
else:
break
return ans
| true | true |
f720e94c7b98eefd4db2a78ffdc2366c09186edd | 942 | py | Python | hypernet/src/thermophysicalModels/chemistry/reactions/reactionRate/arrhenius.py | christian-jacobsen/hypernet | 9f62e1531eb152cc08af0b0c6b09d6fde8d42400 | [
"Apache-2.0"
] | null | null | null | hypernet/src/thermophysicalModels/chemistry/reactions/reactionRate/arrhenius.py | christian-jacobsen/hypernet | 9f62e1531eb152cc08af0b0c6b09d6fde8d42400 | [
"Apache-2.0"
] | null | null | null | hypernet/src/thermophysicalModels/chemistry/reactions/reactionRate/arrhenius.py | christian-jacobsen/hypernet | 9f62e1531eb152cc08af0b0c6b09d6fde8d42400 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from hypernet.src.thermophysicalModels.chemistry.reactions.reactionRate import Basic
class Arrhenius(Basic):
# Initialization
###########################################################################
def __init__(
self,
reactionsDatabase,
*args,
**kwargs
):
super(Arrhenius, self).__init__(
reactionsDatabase,
*args,
**kwargs
)
self.A = self.reacDB['A'].to_numpy()
self.beta = self.reacDB['beta'].to_numpy()
self.Ta = self.reacDB['Ta'].to_numpy()
# Methods
###########################################################################
# Forward reaction rates --------------------------------------------------
def k_(self, T):
return self.A * np.power(T, self.beta) * np.exp(-self.Ta / T)
def dkdT_(self, T):
return (self.beta + self.Ta / T) * self.k / T
| 28.545455 | 84 | 0.440552 | import numpy as np
from hypernet.src.thermophysicalModels.chemistry.reactions.reactionRate import Basic
class Arrhenius(Basic):
| true | true |
f720eaa230ec470ea6eabf1b1bc884458772e552 | 9,670 | py | Python | qpth/qp.py | lopa23/flim_optcrf | 2d9a1dba37a7e5e6beae66c536b07bb7ae4bdfe9 | [
"Apache-2.0"
] | null | null | null | qpth/qp.py | lopa23/flim_optcrf | 2d9a1dba37a7e5e6beae66c536b07bb7ae4bdfe9 | [
"Apache-2.0"
] | null | null | null | qpth/qp.py | lopa23/flim_optcrf | 2d9a1dba37a7e5e6beae66c536b07bb7ae4bdfe9 | [
"Apache-2.0"
] | null | null | null | import torch
from torch.autograd import Function
from .util import bger, expandParam, extract_nBatch
from . import solvers
from .solvers.pdipm import batch as pdipm_b
from .solvers.pdipm import spbatch as pdipm_spb
# from .solvers.pdipm import single as pdipm_s
from enum import Enum
class QPSolvers(Enum):
PDIPM_BATCHED = 1
CVXPY = 2
def QPFunction(eps=1e-12, verbose=1, notImprovedLim=3,
maxIter=20, solver=QPSolvers.PDIPM_BATCHED,
check_Q_spd=False):
class QPFunctionFn(Function):
@staticmethod
def forward(ctx, Q_, p_, G_, h_, A_, b_):
"""Solve a batch of QPs.
This function solves a batch of QPs, each optimizing over
`nz` variables and having `nineq` inequality constraints
and `neq` equality constraints.
The optimization problem for each instance in the batch
(dropping indexing from the notation) is of the form
\hat z = argmin_z 1/2 z^T Q z + p^T z
subject to Gz <= h
Az = b
where Q \in S^{nz,nz},
S^{nz,nz} is the set of all positive semi-definite matrices,
p \in R^{nz}
G \in R^{nineq,nz}
h \in R^{nineq}
A \in R^{neq,nz}
b \in R^{neq}
These parameters should all be passed to this function as
Variable- or Parameter-wrapped Tensors.
(See torch.autograd.Variable and torch.nn.parameter.Parameter)
If you want to solve a batch of QPs where `nz`, `nineq` and `neq`
are the same, but some of the contents differ across the
minibatch, you can pass in tensors in the standard way
where the first dimension indicates the batch example.
This can be done with some or all of the coefficients.
You do not need to add an extra dimension to coefficients
that will not change across all of the minibatch examples.
This function is able to infer such cases.
If you don't want to use any equality or inequality constraints,
you can set the appropriate values to:
e = Variable(torch.Tensor())
Parameters:
Q: A (nBatch, nz, nz) or (nz, nz) Tensor.
p: A (nBatch, nz) or (nz) Tensor.
G: A (nBatch, nineq, nz) or (nineq, nz) Tensor.
h: A (nBatch, nineq) or (nineq) Tensor.
A: A (nBatch, neq, nz) or (neq, nz) Tensor.
b: A (nBatch, neq) or (neq) Tensor.
Returns: \hat z: a (nBatch, nz) Tensor.
"""
nBatch = extract_nBatch(Q_, p_, G_, h_, A_, b_)
Q, _ = expandParam(Q_, nBatch, 3)
p, _ = expandParam(p_, nBatch, 2)
G, _ = expandParam(G_, nBatch, 3)
h, _ = expandParam(h_, nBatch, 2)
A, _ = expandParam(A_, nBatch, 3)
b, _ = expandParam(b_, nBatch, 2)
if check_Q_spd:
for i in range(nBatch):
e, _ = torch.eig(Q[i])
if not torch.all(e[:,0] > 0):
raise RuntimeError('Q is not SPD.')
_, nineq, nz = G.size()
print("In constructor QP", G.size())
neq = A.size(1) if A.nelement() > 0 else 0
assert(neq > 0 or nineq > 0)
ctx.neq, ctx.nineq, ctx.nz = neq, nineq, nz
if solver == QPSolvers.PDIPM_BATCHED:
ctx.Q_LU, ctx.S_LU, ctx.R = pdipm_b.pre_factor_kkt(Q, G, A)
zhats, ctx.nus, ctx.lams, ctx.slacks = pdipm_b.forward(
Q, p, G, h, A, b, ctx.Q_LU, ctx.S_LU, ctx.R,
eps, verbose, notImprovedLim, maxIter)
elif solver == QPSolvers.CVXPY:
vals = torch.Tensor(nBatch).type_as(Q)
zhats = torch.Tensor(nBatch, ctx.nz).type_as(Q)
lams = torch.Tensor(nBatch, ctx.nineq).type_as(Q)
nus = torch.Tensor(nBatch, ctx.neq).type_as(Q) \
if ctx.neq > 0 else torch.Tensor()
slacks = torch.Tensor(nBatch, ctx.nineq).type_as(Q)
for i in range(nBatch):
Ai, bi = (A[i], b[i]) if neq > 0 else (None, None)
vals[i], zhati, nui, lami, si = solvers.cvxpy.forward_single_np(
*[x.cpu().numpy() if x is not None else None
for x in (Q[i], p[i], G[i], h[i], Ai, bi)])
# if zhati[0] is None:
# import IPython, sys; IPython.embed(); sys.exit(-1)
zhats[i] = torch.Tensor(zhati)
lams[i] = torch.Tensor(lami)
slacks[i] = torch.Tensor(si)
if neq > 0:
nus[i] = torch.Tensor(nui)
ctx.vals = vals
ctx.lams = lams
ctx.nus = nus
ctx.slacks = slacks
else:
assert False
ctx.save_for_backward(zhats, Q_, p_, G_, h_, A_, b_)
return zhats
@staticmethod
def backward(ctx, dl_dzhat):
zhats, Q, p, G, h, A, b = ctx.saved_tensors
nBatch = extract_nBatch(Q, p, G, h, A, b)
Q, Q_e = expandParam(Q, nBatch, 3)
p, p_e = expandParam(p, nBatch, 2)
G, G_e = expandParam(G, nBatch, 3)
h, h_e = expandParam(h, nBatch, 2)
A, A_e = expandParam(A, nBatch, 3)
b, b_e = expandParam(b, nBatch, 2)
# neq, nineq, nz = ctx.neq, ctx.nineq, ctx.nz
neq, nineq = ctx.neq, ctx.nineq
#print("Here in backward")
if solver == QPSolvers.CVXPY:
ctx.Q_LU, ctx.S_LU, ctx.R = pdipm_b.pre_factor_kkt(Q, G, A)
# Clamp here to avoid issues coming up when the slacks are too small.
# TODO: A better fix would be to get lams and slacks from the
# solver that don't have this issue.
d = torch.clamp(ctx.lams, min=1e-8) / torch.clamp(ctx.slacks, min=1e-8)
pdipm_b.factor_kkt(ctx.S_LU, ctx.R, d)
dx, _, dlam, dnu = pdipm_b.solve_kkt(
ctx.Q_LU, d, G, A, ctx.S_LU,
dl_dzhat, torch.zeros(nBatch, nineq).type_as(G),
torch.zeros(nBatch, nineq).type_as(G),
torch.zeros(nBatch, neq).type_as(G) if neq > 0 else torch.Tensor())
print("In backwards,aftersolve_kkt")
dps = dx
dGs = bger(dlam, zhats) + bger(ctx.lams, dx)
if G_e:
dGs = dGs.mean(0)
dhs = -dlam
if h_e:
dhs = dhs.mean(0)
if neq > 0:
dAs = bger(dnu, zhats) + bger(ctx.nus, dx)
dbs = -dnu
if A_e:
dAs = dAs.mean(0)
if b_e:
dbs = dbs.mean(0)
else:
dAs, dbs = None, None
dQs = 0.5 * (bger(dx, zhats) + bger(zhats, dx))
if Q_e:
dQs = dQs.mean(0)
if p_e:
dps = dps.mean(0)
grads = (dQs, dps, dGs, dhs, dAs, dbs)
return grads
return QPFunctionFn.apply
class SpQPFunction(Function):
def __init__(self, Qi, Qsz, Gi, Gsz, Ai, Asz,
eps=1e-12, verbose=0, notImprovedLim=3, maxIter=20):
self.Qi, self.Qsz = Qi, Qsz
self.Gi, self.Gsz = Gi, Gsz
self.Ai, self.Asz = Ai, Asz
self.eps = eps
self.verbose = verbose
self.notImprovedLim = notImprovedLim
self.maxIter = maxIter
self.nineq, self.nz = Gsz
self.neq, _ = Asz
def forward(self, Qv, p, Gv, h, Av, b):
self.nBatch = Qv.size(0)
zhats, self.nus, self.lams, self.slacks = pdipm_spb.forward(
self.Qi, Qv, self.Qsz, p, self.Gi, Gv, self.Gsz, h,
self.Ai, Av, self.Asz, b, self.eps, self.verbose,
self.notImprovedLim, self.maxIter)
self.save_for_backward(zhats, Qv, p, Gv, h, Av, b)
return zhats
def backward(self, dl_dzhat):
zhats, Qv, p, Gv, h, Av, b = self.saved_tensors
Di = type(self.Qi)([range(self.nineq), range(self.nineq)])
Dv = self.lams / self.slacks
Dsz = torch.Size([self.nineq, self.nineq])
dx, _, dlam, dnu = pdipm_spb.solve_kkt(
self.Qi, Qv, self.Qsz, Di, Dv, Dsz,
self.Gi, Gv, self.Gsz,
self.Ai, Av, self.Asz, dl_dzhat,
type(p)(self.nBatch, self.nineq).zero_(),
type(p)(self.nBatch, self.nineq).zero_(),
type(p)(self.nBatch, self.neq).zero_())
dps = dx
dGs = bger(dlam, zhats) + bger(self.lams, dx)
GM = torch.cuda.sparse.DoubleTensor(
self.Gi, Gv[0].clone().fill_(1.0), self.Gsz
).to_dense().byte().expand_as(dGs)
dGs = dGs[GM].view_as(Gv)
dhs = -dlam
dAs = bger(dnu, zhats) + bger(self.nus, dx)
AM = torch.cuda.sparse.DoubleTensor(
self.Ai, Av[0].clone().fill_(1.0), self.Asz
).to_dense().byte().expand_as(dAs)
dAs = dAs[AM].view_as(Av)
dbs = -dnu
dQs = 0.5 * (bger(dx, zhats) + bger(zhats, dx))
QM = torch.cuda.sparse.DoubleTensor(
self.Qi, Qv[0].clone().fill_(1.0), self.Qsz
).to_dense().byte().expand_as(dQs)
dQs = dQs[QM].view_as(Qv)
grads = (dQs, dps, dGs, dhs, dAs, dbs)
return grads
| 37.773438 | 84 | 0.512099 | import torch
from torch.autograd import Function
from .util import bger, expandParam, extract_nBatch
from . import solvers
from .solvers.pdipm import batch as pdipm_b
from .solvers.pdipm import spbatch as pdipm_spb
from enum import Enum
class QPSolvers(Enum):
PDIPM_BATCHED = 1
CVXPY = 2
def QPFunction(eps=1e-12, verbose=1, notImprovedLim=3,
maxIter=20, solver=QPSolvers.PDIPM_BATCHED,
check_Q_spd=False):
class QPFunctionFn(Function):
@staticmethod
def forward(ctx, Q_, p_, G_, h_, A_, b_):
nBatch = extract_nBatch(Q_, p_, G_, h_, A_, b_)
Q, _ = expandParam(Q_, nBatch, 3)
p, _ = expandParam(p_, nBatch, 2)
G, _ = expandParam(G_, nBatch, 3)
h, _ = expandParam(h_, nBatch, 2)
A, _ = expandParam(A_, nBatch, 3)
b, _ = expandParam(b_, nBatch, 2)
if check_Q_spd:
for i in range(nBatch):
e, _ = torch.eig(Q[i])
if not torch.all(e[:,0] > 0):
raise RuntimeError('Q is not SPD.')
_, nineq, nz = G.size()
print("In constructor QP", G.size())
neq = A.size(1) if A.nelement() > 0 else 0
assert(neq > 0 or nineq > 0)
ctx.neq, ctx.nineq, ctx.nz = neq, nineq, nz
if solver == QPSolvers.PDIPM_BATCHED:
ctx.Q_LU, ctx.S_LU, ctx.R = pdipm_b.pre_factor_kkt(Q, G, A)
zhats, ctx.nus, ctx.lams, ctx.slacks = pdipm_b.forward(
Q, p, G, h, A, b, ctx.Q_LU, ctx.S_LU, ctx.R,
eps, verbose, notImprovedLim, maxIter)
elif solver == QPSolvers.CVXPY:
vals = torch.Tensor(nBatch).type_as(Q)
zhats = torch.Tensor(nBatch, ctx.nz).type_as(Q)
lams = torch.Tensor(nBatch, ctx.nineq).type_as(Q)
nus = torch.Tensor(nBatch, ctx.neq).type_as(Q) \
if ctx.neq > 0 else torch.Tensor()
slacks = torch.Tensor(nBatch, ctx.nineq).type_as(Q)
for i in range(nBatch):
Ai, bi = (A[i], b[i]) if neq > 0 else (None, None)
vals[i], zhati, nui, lami, si = solvers.cvxpy.forward_single_np(
*[x.cpu().numpy() if x is not None else None
for x in (Q[i], p[i], G[i], h[i], Ai, bi)])
zhats[i] = torch.Tensor(zhati)
lams[i] = torch.Tensor(lami)
slacks[i] = torch.Tensor(si)
if neq > 0:
nus[i] = torch.Tensor(nui)
ctx.vals = vals
ctx.lams = lams
ctx.nus = nus
ctx.slacks = slacks
else:
assert False
ctx.save_for_backward(zhats, Q_, p_, G_, h_, A_, b_)
return zhats
@staticmethod
def backward(ctx, dl_dzhat):
zhats, Q, p, G, h, A, b = ctx.saved_tensors
nBatch = extract_nBatch(Q, p, G, h, A, b)
Q, Q_e = expandParam(Q, nBatch, 3)
p, p_e = expandParam(p, nBatch, 2)
G, G_e = expandParam(G, nBatch, 3)
h, h_e = expandParam(h, nBatch, 2)
A, A_e = expandParam(A, nBatch, 3)
b, b_e = expandParam(b, nBatch, 2)
neq, nineq = ctx.neq, ctx.nineq
if solver == QPSolvers.CVXPY:
ctx.Q_LU, ctx.S_LU, ctx.R = pdipm_b.pre_factor_kkt(Q, G, A)
d = torch.clamp(ctx.lams, min=1e-8) / torch.clamp(ctx.slacks, min=1e-8)
pdipm_b.factor_kkt(ctx.S_LU, ctx.R, d)
dx, _, dlam, dnu = pdipm_b.solve_kkt(
ctx.Q_LU, d, G, A, ctx.S_LU,
dl_dzhat, torch.zeros(nBatch, nineq).type_as(G),
torch.zeros(nBatch, nineq).type_as(G),
torch.zeros(nBatch, neq).type_as(G) if neq > 0 else torch.Tensor())
print("In backwards,aftersolve_kkt")
dps = dx
dGs = bger(dlam, zhats) + bger(ctx.lams, dx)
if G_e:
dGs = dGs.mean(0)
dhs = -dlam
if h_e:
dhs = dhs.mean(0)
if neq > 0:
dAs = bger(dnu, zhats) + bger(ctx.nus, dx)
dbs = -dnu
if A_e:
dAs = dAs.mean(0)
if b_e:
dbs = dbs.mean(0)
else:
dAs, dbs = None, None
dQs = 0.5 * (bger(dx, zhats) + bger(zhats, dx))
if Q_e:
dQs = dQs.mean(0)
if p_e:
dps = dps.mean(0)
grads = (dQs, dps, dGs, dhs, dAs, dbs)
return grads
return QPFunctionFn.apply
class SpQPFunction(Function):
def __init__(self, Qi, Qsz, Gi, Gsz, Ai, Asz,
eps=1e-12, verbose=0, notImprovedLim=3, maxIter=20):
self.Qi, self.Qsz = Qi, Qsz
self.Gi, self.Gsz = Gi, Gsz
self.Ai, self.Asz = Ai, Asz
self.eps = eps
self.verbose = verbose
self.notImprovedLim = notImprovedLim
self.maxIter = maxIter
self.nineq, self.nz = Gsz
self.neq, _ = Asz
def forward(self, Qv, p, Gv, h, Av, b):
self.nBatch = Qv.size(0)
zhats, self.nus, self.lams, self.slacks = pdipm_spb.forward(
self.Qi, Qv, self.Qsz, p, self.Gi, Gv, self.Gsz, h,
self.Ai, Av, self.Asz, b, self.eps, self.verbose,
self.notImprovedLim, self.maxIter)
self.save_for_backward(zhats, Qv, p, Gv, h, Av, b)
return zhats
def backward(self, dl_dzhat):
zhats, Qv, p, Gv, h, Av, b = self.saved_tensors
Di = type(self.Qi)([range(self.nineq), range(self.nineq)])
Dv = self.lams / self.slacks
Dsz = torch.Size([self.nineq, self.nineq])
dx, _, dlam, dnu = pdipm_spb.solve_kkt(
self.Qi, Qv, self.Qsz, Di, Dv, Dsz,
self.Gi, Gv, self.Gsz,
self.Ai, Av, self.Asz, dl_dzhat,
type(p)(self.nBatch, self.nineq).zero_(),
type(p)(self.nBatch, self.nineq).zero_(),
type(p)(self.nBatch, self.neq).zero_())
dps = dx
dGs = bger(dlam, zhats) + bger(self.lams, dx)
GM = torch.cuda.sparse.DoubleTensor(
self.Gi, Gv[0].clone().fill_(1.0), self.Gsz
).to_dense().byte().expand_as(dGs)
dGs = dGs[GM].view_as(Gv)
dhs = -dlam
dAs = bger(dnu, zhats) + bger(self.nus, dx)
AM = torch.cuda.sparse.DoubleTensor(
self.Ai, Av[0].clone().fill_(1.0), self.Asz
).to_dense().byte().expand_as(dAs)
dAs = dAs[AM].view_as(Av)
dbs = -dnu
dQs = 0.5 * (bger(dx, zhats) + bger(zhats, dx))
QM = torch.cuda.sparse.DoubleTensor(
self.Qi, Qv[0].clone().fill_(1.0), self.Qsz
).to_dense().byte().expand_as(dQs)
dQs = dQs[QM].view_as(Qv)
grads = (dQs, dps, dGs, dhs, dAs, dbs)
return grads
| true | true |
f720ef19782f7092c0e07d4d635eb810543e0ea4 | 9,608 | py | Python | tests/functional/tests/management/test_add_remove.py | beef9999/ocf | 4d1b086956e3019456fa86c33954eeb53cfeab9e | [
"BSD-3-Clause-Clear"
] | null | null | null | tests/functional/tests/management/test_add_remove.py | beef9999/ocf | 4d1b086956e3019456fa86c33954eeb53cfeab9e | [
"BSD-3-Clause-Clear"
] | null | null | null | tests/functional/tests/management/test_add_remove.py | beef9999/ocf | 4d1b086956e3019456fa86c33954eeb53cfeab9e | [
"BSD-3-Clause-Clear"
] | null | null | null | # Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import pytest
from ctypes import c_int
from random import randint
from pyocf.types.cache import Cache, CacheMode
from pyocf.types.core import Core
from pyocf.types.volume import Volume
from pyocf.types.data import Data
from pyocf.types.io import IoDir
from pyocf.utils import Size as S
from pyocf.types.shared import OcfError, OcfCompletion, CacheLineSize
@pytest.mark.parametrize("cache_mode", CacheMode)
@pytest.mark.parametrize("cls", CacheLineSize)
def test_adding_core(pyocf_ctx, cache_mode, cls):
# Start cache device
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(
cache_device, cache_mode=cache_mode, cache_line_size=cls
)
# Create core device
core_device = Volume(S.from_MiB(10))
core = Core.using_device(core_device)
# Check statistics before adding core
stats = cache.get_stats()
assert stats["conf"]["core_count"] == 0
# Add core to cache
cache.add_core(core)
# Check statistics after adding core
stats = cache.get_stats()
assert stats["conf"]["core_count"] == 1
@pytest.mark.parametrize("cache_mode", CacheMode)
@pytest.mark.parametrize("cls", CacheLineSize)
def test_removing_core(pyocf_ctx, cache_mode, cls):
# Start cache device
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(
cache_device, cache_mode=cache_mode, cache_line_size=cls
)
# Create core device
core_device = Volume(S.from_MiB(10))
core = Core.using_device(core_device)
# Add core to cache
cache.add_core(core)
# Remove core from cache
cache.remove_core(core)
# Check statistics after removing core
stats = cache.get_stats()
assert stats["conf"]["core_count"] == 0
@pytest.mark.parametrize("cache_mode", [CacheMode.WB])
@pytest.mark.parametrize("cls", CacheLineSize)
def test_remove_dirty_no_flush(pyocf_ctx, cache_mode, cls):
# Start cache device
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(
cache_device, cache_mode=cache_mode, cache_line_size=cls
)
# Create core device
core_device = Volume(S.from_MiB(10))
core = Core.using_device(core_device)
cache.add_core(core)
# Prepare data
core_size = core.get_stats()["size"]
data = Data(core_size.B)
_io_to_core(core, data)
# Remove core from cache
cache.remove_core(core)
def test_30add_remove(pyocf_ctx):
# Start cache device
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(cache_device)
# Create core device
core_device = Volume(S.from_MiB(10))
core = Core.using_device(core_device)
# Add and remove core device in a loop 100 times
# Check statistics after every operation
for i in range(0, 30):
cache.add_core(core)
stats = cache.get_stats()
assert stats["conf"]["core_count"] == 1
cache.remove_core(core)
stats = cache.get_stats()
assert stats["conf"]["core_count"] == 0
def test_10add_remove_with_io(pyocf_ctx):
# Start cache device
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(cache_device)
# Create core device
core_device = Volume(S.from_MiB(10))
core = Core.using_device(core_device)
# Add and remove core 10 times in a loop with io in between
for i in range(0, 10):
cache.add_core(core)
stats = cache.get_stats()
assert stats["conf"]["core_count"] == 1
write_data = Data.from_string("Test data")
io = core.new_io(
cache.get_default_queue(), S.from_sector(1).B, write_data.size,
IoDir.WRITE, 0, 0
)
io.set_data(write_data)
cmpl = OcfCompletion([("err", c_int)])
io.callback = cmpl.callback
io.submit()
cmpl.wait()
cache.remove_core(core)
stats = cache.get_stats()
assert stats["conf"]["core_count"] == 0
def test_add_remove_30core(pyocf_ctx):
# Start cache device
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(cache_device)
core_devices = []
core_amount = 30
# Add 50 cores and check stats after each addition
for i in range(0, core_amount):
stats = cache.get_stats()
assert stats["conf"]["core_count"] == i
core_device = Volume(S.from_MiB(10))
core = Core.using_device(core_device, name=f"core{i}")
core_devices.append(core)
cache.add_core(core)
# Remove 50 cores and check stats before each removal
for i in range(0, core_amount):
stats = cache.get_stats()
assert stats["conf"]["core_count"] == core_amount - i
cache.remove_core(core_devices[i])
# Check statistics
stats = cache.get_stats()
assert stats["conf"]["core_count"] == 0
def test_adding_to_random_cache(pyocf_ctx):
cache_devices = []
core_devices = {}
cache_amount = 5
core_amount = 30
# Create 5 cache devices
for i in range(0, cache_amount):
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(cache_device, name=f"cache{i}")
cache_devices.append(cache)
# Create 50 core devices and add to random cache
for i in range(0, core_amount):
core_device = Volume(S.from_MiB(10))
core = Core.using_device(core_device, name=f"core{i}")
core_devices[core] = randint(0, cache_amount - 1)
cache_devices[core_devices[core]].add_core(core)
# Count expected number of cores per cache
count_dict = {}
for i in range(0, cache_amount):
count_dict[i] = sum(k == i for k in core_devices.values())
# Check if cache statistics are as expected
for i in range(0, cache_amount):
stats = cache_devices[i].get_stats()
assert stats["conf"]["core_count"] == count_dict[i]
@pytest.mark.parametrize("cache_mode", CacheMode)
@pytest.mark.parametrize("cls", CacheLineSize)
def test_adding_core_twice(pyocf_ctx, cache_mode, cls):
# Start cache device
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(
cache_device, cache_mode=cache_mode, cache_line_size=cls
)
# Create core device
core_device = Volume(S.from_MiB(10))
core = Core.using_device(core_device)
# Add core
cache.add_core(core)
# Check that it is not possible to add the same core again
with pytest.raises(OcfError):
cache.add_core(core)
# Check that core count is still equal to one
stats = cache.get_stats()
assert stats["conf"]["core_count"] == 1
@pytest.mark.parametrize("cache_mode", CacheMode)
@pytest.mark.parametrize("cls", CacheLineSize)
def test_adding_core_already_used(pyocf_ctx, cache_mode, cls):
# Start first cache device
cache_device1 = Volume(S.from_MiB(30))
cache1 = Cache.start_on_device(
cache_device1, cache_mode=cache_mode, cache_line_size=cls, name="cache1"
)
# Start second cache device
cache_device2 = Volume(S.from_MiB(30))
cache2 = Cache.start_on_device(
cache_device2, cache_mode=cache_mode, cache_line_size=cls, name="cache2"
)
# Create core device
core_device = Volume(S.from_MiB(10))
core = Core.using_device(core_device)
# Add core to first cache
cache1.add_core(core)
# Check that it is not possible to add core to second cache
with pytest.raises(OcfError):
cache2.add_core(core)
# Check that core count is as expected
stats = cache1.get_stats()
assert stats["conf"]["core_count"] == 1
stats = cache2.get_stats()
assert stats["conf"]["core_count"] == 0
@pytest.mark.parametrize("cache_mode", CacheMode)
@pytest.mark.parametrize("cls", CacheLineSize)
def test_add_remove_incrementally(pyocf_ctx, cache_mode, cls):
# Start cache device
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(
cache_device, cache_mode=cache_mode, cache_line_size=cls
)
core_devices = []
core_amount = 5
# Create 5 core devices and add to cache
for i in range(0, core_amount):
core_device = Volume(S.from_MiB(10))
core = Core.using_device(core_device, name=f"core{i}")
core_devices.append(core)
cache.add_core(core)
# Check that core count is as expected
stats = cache.get_stats()
assert stats["conf"]["core_count"] == core_amount
# Remove 3 cores
cache.remove_core(core_devices[0])
cache.remove_core(core_devices[1])
cache.remove_core(core_devices[2])
# Add 2 cores and check if core count is as expected
cache.add_core(core_devices[0])
cache.add_core(core_devices[1])
stats = cache.get_stats()
assert stats["conf"]["core_count"] == core_amount - 1
# Remove 1 core and check if core count is as expected
cache.remove_core(core_devices[1])
stats = cache.get_stats()
assert stats["conf"]["core_count"] == core_amount - 2
# Add 2 cores and check if core count is as expected
cache.add_core(core_devices[1])
cache.add_core(core_devices[2])
stats = cache.get_stats()
assert stats["conf"]["core_count"] == core_amount
def _io_to_core(exported_obj: Core, data: Data):
io = exported_obj.new_io(exported_obj.cache.get_default_queue(), 0, data.size,
IoDir.WRITE, 0, 0)
io.set_data(data)
completion = OcfCompletion([("err", c_int)])
io.callback = completion.callback
io.submit()
completion.wait()
assert completion.results["err"] == 0, "IO to exported object completion"
| 30.405063 | 82 | 0.679954 |
import pytest
from ctypes import c_int
from random import randint
from pyocf.types.cache import Cache, CacheMode
from pyocf.types.core import Core
from pyocf.types.volume import Volume
from pyocf.types.data import Data
from pyocf.types.io import IoDir
from pyocf.utils import Size as S
from pyocf.types.shared import OcfError, OcfCompletion, CacheLineSize
@pytest.mark.parametrize("cache_mode", CacheMode)
@pytest.mark.parametrize("cls", CacheLineSize)
def test_adding_core(pyocf_ctx, cache_mode, cls):
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(
cache_device, cache_mode=cache_mode, cache_line_size=cls
)
core_device = Volume(S.from_MiB(10))
core = Core.using_device(core_device)
stats = cache.get_stats()
assert stats["conf"]["core_count"] == 0
cache.add_core(core)
stats = cache.get_stats()
assert stats["conf"]["core_count"] == 1
@pytest.mark.parametrize("cache_mode", CacheMode)
@pytest.mark.parametrize("cls", CacheLineSize)
def test_removing_core(pyocf_ctx, cache_mode, cls):
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(
cache_device, cache_mode=cache_mode, cache_line_size=cls
)
core_device = Volume(S.from_MiB(10))
core = Core.using_device(core_device)
cache.add_core(core)
cache.remove_core(core)
stats = cache.get_stats()
assert stats["conf"]["core_count"] == 0
@pytest.mark.parametrize("cache_mode", [CacheMode.WB])
@pytest.mark.parametrize("cls", CacheLineSize)
def test_remove_dirty_no_flush(pyocf_ctx, cache_mode, cls):
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(
cache_device, cache_mode=cache_mode, cache_line_size=cls
)
core_device = Volume(S.from_MiB(10))
core = Core.using_device(core_device)
cache.add_core(core)
core_size = core.get_stats()["size"]
data = Data(core_size.B)
_io_to_core(core, data)
cache.remove_core(core)
def test_30add_remove(pyocf_ctx):
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(cache_device)
core_device = Volume(S.from_MiB(10))
core = Core.using_device(core_device)
for i in range(0, 30):
cache.add_core(core)
stats = cache.get_stats()
assert stats["conf"]["core_count"] == 1
cache.remove_core(core)
stats = cache.get_stats()
assert stats["conf"]["core_count"] == 0
def test_10add_remove_with_io(pyocf_ctx):
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(cache_device)
core_device = Volume(S.from_MiB(10))
core = Core.using_device(core_device)
for i in range(0, 10):
cache.add_core(core)
stats = cache.get_stats()
assert stats["conf"]["core_count"] == 1
write_data = Data.from_string("Test data")
io = core.new_io(
cache.get_default_queue(), S.from_sector(1).B, write_data.size,
IoDir.WRITE, 0, 0
)
io.set_data(write_data)
cmpl = OcfCompletion([("err", c_int)])
io.callback = cmpl.callback
io.submit()
cmpl.wait()
cache.remove_core(core)
stats = cache.get_stats()
assert stats["conf"]["core_count"] == 0
def test_add_remove_30core(pyocf_ctx):
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(cache_device)
core_devices = []
core_amount = 30
for i in range(0, core_amount):
stats = cache.get_stats()
assert stats["conf"]["core_count"] == i
core_device = Volume(S.from_MiB(10))
core = Core.using_device(core_device, name=f"core{i}")
core_devices.append(core)
cache.add_core(core)
for i in range(0, core_amount):
stats = cache.get_stats()
assert stats["conf"]["core_count"] == core_amount - i
cache.remove_core(core_devices[i])
stats = cache.get_stats()
assert stats["conf"]["core_count"] == 0
def test_adding_to_random_cache(pyocf_ctx):
cache_devices = []
core_devices = {}
cache_amount = 5
core_amount = 30
for i in range(0, cache_amount):
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(cache_device, name=f"cache{i}")
cache_devices.append(cache)
for i in range(0, core_amount):
core_device = Volume(S.from_MiB(10))
core = Core.using_device(core_device, name=f"core{i}")
core_devices[core] = randint(0, cache_amount - 1)
cache_devices[core_devices[core]].add_core(core)
count_dict = {}
for i in range(0, cache_amount):
count_dict[i] = sum(k == i for k in core_devices.values())
for i in range(0, cache_amount):
stats = cache_devices[i].get_stats()
assert stats["conf"]["core_count"] == count_dict[i]
@pytest.mark.parametrize("cache_mode", CacheMode)
@pytest.mark.parametrize("cls", CacheLineSize)
def test_adding_core_twice(pyocf_ctx, cache_mode, cls):
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(
cache_device, cache_mode=cache_mode, cache_line_size=cls
)
core_device = Volume(S.from_MiB(10))
core = Core.using_device(core_device)
cache.add_core(core)
with pytest.raises(OcfError):
cache.add_core(core)
stats = cache.get_stats()
assert stats["conf"]["core_count"] == 1
@pytest.mark.parametrize("cache_mode", CacheMode)
@pytest.mark.parametrize("cls", CacheLineSize)
def test_adding_core_already_used(pyocf_ctx, cache_mode, cls):
cache_device1 = Volume(S.from_MiB(30))
cache1 = Cache.start_on_device(
cache_device1, cache_mode=cache_mode, cache_line_size=cls, name="cache1"
)
cache_device2 = Volume(S.from_MiB(30))
cache2 = Cache.start_on_device(
cache_device2, cache_mode=cache_mode, cache_line_size=cls, name="cache2"
)
core_device = Volume(S.from_MiB(10))
core = Core.using_device(core_device)
cache1.add_core(core)
with pytest.raises(OcfError):
cache2.add_core(core)
stats = cache1.get_stats()
assert stats["conf"]["core_count"] == 1
stats = cache2.get_stats()
assert stats["conf"]["core_count"] == 0
@pytest.mark.parametrize("cache_mode", CacheMode)
@pytest.mark.parametrize("cls", CacheLineSize)
def test_add_remove_incrementally(pyocf_ctx, cache_mode, cls):
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(
cache_device, cache_mode=cache_mode, cache_line_size=cls
)
core_devices = []
core_amount = 5
for i in range(0, core_amount):
core_device = Volume(S.from_MiB(10))
core = Core.using_device(core_device, name=f"core{i}")
core_devices.append(core)
cache.add_core(core)
stats = cache.get_stats()
assert stats["conf"]["core_count"] == core_amount
cache.remove_core(core_devices[0])
cache.remove_core(core_devices[1])
cache.remove_core(core_devices[2])
cache.add_core(core_devices[0])
cache.add_core(core_devices[1])
stats = cache.get_stats()
assert stats["conf"]["core_count"] == core_amount - 1
cache.remove_core(core_devices[1])
stats = cache.get_stats()
assert stats["conf"]["core_count"] == core_amount - 2
cache.add_core(core_devices[1])
cache.add_core(core_devices[2])
stats = cache.get_stats()
assert stats["conf"]["core_count"] == core_amount
def _io_to_core(exported_obj: Core, data: Data):
io = exported_obj.new_io(exported_obj.cache.get_default_queue(), 0, data.size,
IoDir.WRITE, 0, 0)
io.set_data(data)
completion = OcfCompletion([("err", c_int)])
io.callback = completion.callback
io.submit()
completion.wait()
assert completion.results["err"] == 0, "IO to exported object completion"
| true | true |
f720efc3c7a943431ee1490b8c525586b3496e7e | 98 | py | Python | game/forms.py | mingaleg/yakubovich | 95398c78eaffbd6ff69f8fdbedfc847531219d8a | [
"MIT"
] | 5 | 2018-12-12T16:24:42.000Z | 2020-02-29T18:45:30.000Z | game/forms.py | mingaleg/yakubovich | 95398c78eaffbd6ff69f8fdbedfc847531219d8a | [
"MIT"
] | 3 | 2020-06-05T17:47:13.000Z | 2022-02-11T03:39:54.000Z | game/forms.py | mingaleg/yakubovich | 95398c78eaffbd6ff69f8fdbedfc847531219d8a | [
"MIT"
] | null | null | null | from django import forms
class GuessForm(forms.Form):
guess = forms.CharField(max_length=32) | 19.6 | 42 | 0.765306 | from django import forms
class GuessForm(forms.Form):
guess = forms.CharField(max_length=32) | true | true |
f720f0cdfccab7e5f9e79ca3a814fc670b37f244 | 7,403 | py | Python | packages/syft/src/syft/core/node/network.py | Noob-can-Compile/PySyft | 156cf93489b16dd0205b0058d4d23d56b3a91ab8 | [
"Apache-2.0"
] | null | null | null | packages/syft/src/syft/core/node/network.py | Noob-can-Compile/PySyft | 156cf93489b16dd0205b0058d4d23d56b3a91ab8 | [
"Apache-2.0"
] | null | null | null | packages/syft/src/syft/core/node/network.py | Noob-can-Compile/PySyft | 156cf93489b16dd0205b0058d4d23d56b3a91ab8 | [
"Apache-2.0"
] | null | null | null | # future
from __future__ import annotations
# stdlib
import os
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
# third party
import ascii_magic
from nacl.signing import SigningKey
from nacl.signing import VerifyKey
from pydantic import BaseSettings
# relative
from ...lib.python import String
from ...logger import error
from ..common.message import SignedImmediateSyftMessageWithReply
from ..common.message import SignedMessage
from ..common.message import SyftMessage
from ..common.uid import UID
from ..io.location import Location
from ..io.location import SpecificLocation
from .common.node import Node
from .common.node_manager.association_request_manager import AssociationRequestManager
from .common.node_manager.node_manager import NodeManager
from .common.node_manager.node_route_manager import NodeRouteManager
from .common.node_manager.role_manager import RoleManager
from .common.node_manager.user_manager import UserManager
from .common.node_service.association_request.association_request_service import (
AssociationRequestService,
)
from .common.node_service.association_request.association_request_service import (
AssociationRequestWithoutReplyService,
)
from .common.node_service.network_search.network_search_service import (
NetworkSearchService,
)
from .common.node_service.node_setup.node_setup_messages import (
CreateInitialSetUpMessage,
)
from .common.node_service.node_setup.node_setup_service import NodeSetupService
from .common.node_service.peer_discovery.peer_discovery_service import (
PeerDiscoveryService,
)
from .common.node_service.ping.ping_service import PingService
from .common.node_service.request_receiver.request_receiver_messages import (
RequestMessage,
)
from .common.node_service.role_manager.role_manager_service import RoleManagerService
from .common.node_service.user_manager.user_manager_service import UserManagerService
from .common.node_service.vpn.vpn_service import VPNConnectService
from .common.node_service.vpn.vpn_service import VPNJoinSelfService
from .common.node_service.vpn.vpn_service import VPNJoinService
from .common.node_service.vpn.vpn_service import VPNRegisterService
from .common.node_service.vpn.vpn_service import VPNStatusService
from .domain import Domain
from .domain_client import DomainClient
from .network_client import NetworkClient
class Network(Node):
network: SpecificLocation
child_type = Domain
client_type = NetworkClient
child_type_client_type = DomainClient
def __init__(
self,
name: Optional[str],
network: SpecificLocation = SpecificLocation(),
domain: Optional[Location] = None,
device: Optional[Location] = None,
vm: Optional[Location] = None,
signing_key: Optional[SigningKey] = None,
verify_key: Optional[VerifyKey] = None,
root_key: Optional[VerifyKey] = None,
db_engine: Any = None,
settings: Optional[BaseSettings] = None,
):
super().__init__(
name=name,
network=network,
domain=domain,
device=device,
vm=vm,
signing_key=signing_key,
verify_key=verify_key,
db_engine=db_engine,
settings=settings,
)
# share settings with the FastAPI application level
self.settings = settings
# specific location with name
self.network = SpecificLocation(name=self.name)
self.root_key = root_key
# Database Management Instances
self.users = UserManager(db_engine)
self.roles = RoleManager(db_engine)
self.node = NodeManager(db_engine)
self.node_route = NodeRouteManager(db_engine)
self.association_requests = AssociationRequestManager(db_engine)
# Grid Network Services
self.immediate_services_with_reply.append(AssociationRequestService)
self.immediate_services_with_reply.append(NodeSetupService)
self.immediate_services_with_reply.append(RoleManagerService)
self.immediate_services_with_reply.append(UserManagerService)
self.immediate_services_with_reply.append(VPNConnectService)
self.immediate_services_with_reply.append(VPNJoinService)
self.immediate_services_with_reply.append(VPNRegisterService)
self.immediate_services_with_reply.append(VPNStatusService)
self.immediate_services_with_reply.append(VPNJoinSelfService)
self.immediate_services_with_reply.append(PingService)
self.immediate_services_with_reply.append(NetworkSearchService)
self.immediate_services_with_reply.append(PeerDiscoveryService)
self.immediate_services_without_reply.append(
AssociationRequestWithoutReplyService
)
self.requests: List[RequestMessage] = list()
# available_device_types = set()
# TODO: add available compute types
# default_device = None
# TODO: add default compute type
self._register_services()
self.request_handlers: List[Dict[Union[str, String], Any]] = []
self.handled_requests: Dict[Any, float] = {}
self.post_init()
def initial_setup( # nosec
self,
first_superuser_name: str = "Jane Doe",
first_superuser_email: str = "info@openmined.org",
first_superuser_password: str = "changethis",
first_superuser_budget: float = 5.55,
domain_name: str = "BigHospital",
) -> Network:
# Build Syft Message
msg: SignedImmediateSyftMessageWithReply = CreateInitialSetUpMessage(
address=self.address,
name=first_superuser_name,
email=first_superuser_email,
password=first_superuser_password,
domain_name=domain_name,
budget=first_superuser_budget,
reply_to=self.address,
).sign(signing_key=self.signing_key)
# Process syft message
_ = self.recv_immediate_msg_with_reply(msg=msg).message
return self
def post_init(self) -> None:
super().post_init()
self.set_node_uid()
def loud_print(self) -> None:
try:
install_path = os.path.abspath(
os.path.join(os.path.realpath(__file__), "../../../img/")
)
ascii_magic.to_terminal(
ascii_magic.from_image_file(
img_path=install_path + "/pygrid.png", columns=83
)
)
print(
r"""
|\ | _ |_ _ _ |
| \| (- |_ \)/ (_) | |(
"""
)
except Exception:
print("NETOWRK NODE (print fail backup)")
@property
def icon(self) -> str:
return "🔗"
@property
def id(self) -> UID:
return self.network.id
def message_is_for_me(self, msg: Union[SyftMessage, SignedMessage]) -> bool:
# this needs to be defensive by checking network_id NOT network.id or it breaks
try:
return msg.address.network_id == self.id and msg.address.domain is None
except Exception as e:
error(f"Error checking if {msg.pprint} is for me on {self.pprint}. {e}")
return False
| 35.763285 | 87 | 0.694178 |
from __future__ import annotations
import os
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
import ascii_magic
from nacl.signing import SigningKey
from nacl.signing import VerifyKey
from pydantic import BaseSettings
from ...lib.python import String
from ...logger import error
from ..common.message import SignedImmediateSyftMessageWithReply
from ..common.message import SignedMessage
from ..common.message import SyftMessage
from ..common.uid import UID
from ..io.location import Location
from ..io.location import SpecificLocation
from .common.node import Node
from .common.node_manager.association_request_manager import AssociationRequestManager
from .common.node_manager.node_manager import NodeManager
from .common.node_manager.node_route_manager import NodeRouteManager
from .common.node_manager.role_manager import RoleManager
from .common.node_manager.user_manager import UserManager
from .common.node_service.association_request.association_request_service import (
AssociationRequestService,
)
from .common.node_service.association_request.association_request_service import (
AssociationRequestWithoutReplyService,
)
from .common.node_service.network_search.network_search_service import (
NetworkSearchService,
)
from .common.node_service.node_setup.node_setup_messages import (
CreateInitialSetUpMessage,
)
from .common.node_service.node_setup.node_setup_service import NodeSetupService
from .common.node_service.peer_discovery.peer_discovery_service import (
PeerDiscoveryService,
)
from .common.node_service.ping.ping_service import PingService
from .common.node_service.request_receiver.request_receiver_messages import (
RequestMessage,
)
from .common.node_service.role_manager.role_manager_service import RoleManagerService
from .common.node_service.user_manager.user_manager_service import UserManagerService
from .common.node_service.vpn.vpn_service import VPNConnectService
from .common.node_service.vpn.vpn_service import VPNJoinSelfService
from .common.node_service.vpn.vpn_service import VPNJoinService
from .common.node_service.vpn.vpn_service import VPNRegisterService
from .common.node_service.vpn.vpn_service import VPNStatusService
from .domain import Domain
from .domain_client import DomainClient
from .network_client import NetworkClient
class Network(Node):
network: SpecificLocation
child_type = Domain
client_type = NetworkClient
child_type_client_type = DomainClient
def __init__(
self,
name: Optional[str],
network: SpecificLocation = SpecificLocation(),
domain: Optional[Location] = None,
device: Optional[Location] = None,
vm: Optional[Location] = None,
signing_key: Optional[SigningKey] = None,
verify_key: Optional[VerifyKey] = None,
root_key: Optional[VerifyKey] = None,
db_engine: Any = None,
settings: Optional[BaseSettings] = None,
):
super().__init__(
name=name,
network=network,
domain=domain,
device=device,
vm=vm,
signing_key=signing_key,
verify_key=verify_key,
db_engine=db_engine,
settings=settings,
)
self.settings = settings
self.network = SpecificLocation(name=self.name)
self.root_key = root_key
self.users = UserManager(db_engine)
self.roles = RoleManager(db_engine)
self.node = NodeManager(db_engine)
self.node_route = NodeRouteManager(db_engine)
self.association_requests = AssociationRequestManager(db_engine)
self.immediate_services_with_reply.append(AssociationRequestService)
self.immediate_services_with_reply.append(NodeSetupService)
self.immediate_services_with_reply.append(RoleManagerService)
self.immediate_services_with_reply.append(UserManagerService)
self.immediate_services_with_reply.append(VPNConnectService)
self.immediate_services_with_reply.append(VPNJoinService)
self.immediate_services_with_reply.append(VPNRegisterService)
self.immediate_services_with_reply.append(VPNStatusService)
self.immediate_services_with_reply.append(VPNJoinSelfService)
self.immediate_services_with_reply.append(PingService)
self.immediate_services_with_reply.append(NetworkSearchService)
self.immediate_services_with_reply.append(PeerDiscoveryService)
self.immediate_services_without_reply.append(
AssociationRequestWithoutReplyService
)
self.requests: List[RequestMessage] = list()
self._register_services()
self.request_handlers: List[Dict[Union[str, String], Any]] = []
self.handled_requests: Dict[Any, float] = {}
self.post_init()
def initial_setup(
self,
first_superuser_name: str = "Jane Doe",
first_superuser_email: str = "info@openmined.org",
first_superuser_password: str = "changethis",
first_superuser_budget: float = 5.55,
domain_name: str = "BigHospital",
) -> Network:
msg: SignedImmediateSyftMessageWithReply = CreateInitialSetUpMessage(
address=self.address,
name=first_superuser_name,
email=first_superuser_email,
password=first_superuser_password,
domain_name=domain_name,
budget=first_superuser_budget,
reply_to=self.address,
).sign(signing_key=self.signing_key)
_ = self.recv_immediate_msg_with_reply(msg=msg).message
return self
def post_init(self) -> None:
super().post_init()
self.set_node_uid()
def loud_print(self) -> None:
try:
install_path = os.path.abspath(
os.path.join(os.path.realpath(__file__), "../../../img/")
)
ascii_magic.to_terminal(
ascii_magic.from_image_file(
img_path=install_path + "/pygrid.png", columns=83
)
)
print(
r"""
|\ | _ |_ _ _ |
| \| (- |_ \)/ (_) | |(
"""
)
except Exception:
print("NETOWRK NODE (print fail backup)")
@property
def icon(self) -> str:
return "🔗"
@property
def id(self) -> UID:
return self.network.id
def message_is_for_me(self, msg: Union[SyftMessage, SignedMessage]) -> bool:
try:
return msg.address.network_id == self.id and msg.address.domain is None
except Exception as e:
error(f"Error checking if {msg.pprint} is for me on {self.pprint}. {e}")
return False
| true | true |
f720f0e6e33f0328fc6c7ca0e2c409dffe494e2d | 469 | py | Python | rest/taskrouter/activities/list/get/example-1/example-1.5.x.py | azaddeveloper/api-snippets | f88b153cd7186fa70b33733b205886502db0d1f2 | [
"MIT"
] | 2 | 2017-11-23T11:31:20.000Z | 2018-01-22T04:14:02.000Z | rest/taskrouter/activities/list/get/example-1/example-1.5.x.py | azaddeveloper/api-snippets | f88b153cd7186fa70b33733b205886502db0d1f2 | [
"MIT"
] | null | null | null | rest/taskrouter/activities/list/get/example-1/example-1.5.x.py | azaddeveloper/api-snippets | f88b153cd7186fa70b33733b205886502db0d1f2 | [
"MIT"
] | 2 | 2020-05-22T23:31:21.000Z | 2021-06-10T18:33:45.000Z | # Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import TwilioTaskRouterClient
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
workspace_sid = "WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
client = TwilioTaskRouterClient(account_sid, auth_token)
for activity in client.activities(workspace_sid).list():
print(activity.friendly_name)
| 36.076923 | 72 | 0.831557 |
from twilio.rest import TwilioTaskRouterClient
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
workspace_sid = "WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
client = TwilioTaskRouterClient(account_sid, auth_token)
for activity in client.activities(workspace_sid).list():
print(activity.friendly_name)
| true | true |
f720f0e9572244aa93d948eff6a96fb8c4142ebe | 26,980 | py | Python | lang/python/github/com/metaprov/modelaapi/services/modelautobuilder/v1/modelautobuilder_pb2.py | metaprov/modelaapi | 64ab493dd73329196235e15776e5177c72281990 | [
"Apache-2.0"
] | 5 | 2022-02-18T03:40:10.000Z | 2022-03-01T16:11:24.000Z | lang/python/github/com/metaprov/modelaapi/services/modelautobuilder/v1/modelautobuilder_pb2.py | metaprov/modelaapi | 64ab493dd73329196235e15776e5177c72281990 | [
"Apache-2.0"
] | 1 | 2022-01-07T19:59:25.000Z | 2022-02-04T01:21:14.000Z | lang/python/github/com/metaprov/modelaapi/services/modelautobuilder/v1/modelautobuilder_pb2.py | metaprov/modelaapi | 64ab493dd73329196235e15776e5177c72281990 | [
"Apache-2.0"
] | 1 | 2022-03-25T10:21:43.000Z | 2022-03-25T10:21:43.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: github.com/metaprov/modelaapi/services/modelautobuilder/v1/modelautobuilder.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from github.com.metaprov.modelaapi.pkg.apis.training.v1alpha1 import generated_pb2 as github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_training_dot_v1alpha1_dot_generated__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='github.com/metaprov/modelaapi/services/modelautobuilder/v1/modelautobuilder.proto',
package='github.com.metaprov.modelaapi.services.modelautobuilder.v1',
syntax='proto3',
serialized_options=b'Z:github.com/metaprov/modelaapi/services/modelautobuilder/v1',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\nQgithub.com/metaprov/modelaapi/services/modelautobuilder/v1/modelautobuilder.proto\x12:github.com.metaprov.modelaapi.services.modelautobuilder.v1\x1a\x1cgoogle/api/annotations.proto\x1aHgithub.com/metaprov/modelaapi/pkg/apis/training/v1alpha1/generated.proto\"\xd6\x01\n\x1cListModelAutobuildersRequest\x12\x11\n\tnamespace\x18\x01 \x01(\t\x12t\n\x06labels\x18\x02 \x03(\x0b\x32\x64.github.com.metaprov.modelaapi.services.modelautobuilder.v1.ListModelAutobuildersRequest.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"~\n\x1dListModelAutobuildersResponse\x12]\n\x05items\x18\x01 \x01(\x0b\x32N.github.com.metaprov.modelaapi.pkg.apis.training.v1alpha1.ModelAutobuilderList\"y\n\x1d\x43reateModelAutobuilderRequest\x12X\n\x04item\x18\x01 \x01(\x0b\x32J.github.com.metaprov.modelaapi.pkg.apis.training.v1alpha1.ModelAutobuilder\" \n\x1e\x43reateModelAutobuilderResponse\"y\n\x1dUpdateModelAutobuilderRequest\x12X\n\x04item\x18\x01 \x01(\x0b\x32J.github.com.metaprov.modelaapi.pkg.apis.training.v1alpha1.ModelAutobuilder\" \n\x1eUpdateModelAutobuilderResponse\"=\n\x1aGetModelAutobuilderRequest\x12\x11\n\tnamespace\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"\x85\x01\n\x1bGetModelAutobuilderResponse\x12X\n\x04item\x18\x01 \x01(\x0b\x32J.github.com.metaprov.modelaapi.pkg.apis.training.v1alpha1.ModelAutobuilder\x12\x0c\n\x04yaml\x18\x02 \x01(\t\"@\n\x1d\x44\x65leteModelAutobuilderRequest\x12\x11\n\tnamespace\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\" \n\x1e\x44\x65leteModelAutobuilderResponse2\xa9\n\n\x17ModelAutobuilderService\x12\xf7\x01\n\x15ListModelAutobuilders\x12X.github.com.metaprov.modelaapi.services.modelautobuilder.v1.ListModelAutobuildersRequest\x1aY.github.com.metaprov.modelaapi.services.modelautobuilder.v1.ListModelAutobuildersResponse\")\x82\xd3\xe4\x93\x02#\x12!/v1/modelautobuilders/{namespace}\x12\xf1\x01\n\x16\x43reateModelAutobuilder\x12Y.github.com.metaprov.modelaapi.services.modelautobuilder.v1.CreateModelAutobuilderRequest\x1aZ.github.com.metaprov.modelaapi.services.modelautobuilder.v1.CreateModelAutobuilderResponse\" \x82\xd3\xe4\x93\x02\x1a\"\x15/v1/modelautobuilders:\x01*\x12\xf8\x01\n\x13GetModelAutobuilder\x12V.github.com.metaprov.modelaapi.services.modelautobuilder.v1.GetModelAutobuilderRequest\x1aW.github.com.metaprov.modelaapi.services.modelautobuilder.v1.GetModelAutobuilderResponse\"0\x82\xd3\xe4\x93\x02*\x12(/v1/modelautobuilders/{namespace}/{name}\x12\xa0\x02\n\x16UpdateModelAutobuilder\x12Y.github.com.metaprov.modelaapi.services.modelautobuilder.v1.UpdateModelAutobuilderRequest\x1aZ.github.com.metaprov.modelaapi.services.modelautobuilder.v1.UpdateModelAutobuilderResponse\"O\x82\xd3\xe4\x93\x02I\x1a\x44/v1/modelautobuilders/{item.metadata.namespace}/{item.metadata.name}:\x01*\x12\x81\x02\n\x16\x44\x65leteModelAutobuilder\x12Y.github.com.metaprov.modelaapi.services.modelautobuilder.v1.DeleteModelAutobuilderRequest\x1aZ.github.com.metaprov.modelaapi.services.modelautobuilder.v1.DeleteModelAutobuilderResponse\"0\x82\xd3\xe4\x93\x02**(/v1/modelautobuilders/{namespace}/{name}B<Z:github.com/metaprov/modelaapi/services/modelautobuilder/v1b\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_training_dot_v1alpha1_dot_generated__pb2.DESCRIPTOR,])
_LISTMODELAUTOBUILDERSREQUEST_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ListModelAutobuildersRequest.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ListModelAutobuildersRequest.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ListModelAutobuildersRequest.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=419,
serialized_end=464,
)
_LISTMODELAUTOBUILDERSREQUEST = _descriptor.Descriptor(
name='ListModelAutobuildersRequest',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ListModelAutobuildersRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='namespace', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ListModelAutobuildersRequest.namespace', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='labels', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ListModelAutobuildersRequest.labels', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_LISTMODELAUTOBUILDERSREQUEST_LABELSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=250,
serialized_end=464,
)
_LISTMODELAUTOBUILDERSRESPONSE = _descriptor.Descriptor(
name='ListModelAutobuildersResponse',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ListModelAutobuildersResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='items', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ListModelAutobuildersResponse.items', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=466,
serialized_end=592,
)
_CREATEMODELAUTOBUILDERREQUEST = _descriptor.Descriptor(
name='CreateModelAutobuilderRequest',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.CreateModelAutobuilderRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='item', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.CreateModelAutobuilderRequest.item', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=594,
serialized_end=715,
)
_CREATEMODELAUTOBUILDERRESPONSE = _descriptor.Descriptor(
name='CreateModelAutobuilderResponse',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.CreateModelAutobuilderResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=717,
serialized_end=749,
)
_UPDATEMODELAUTOBUILDERREQUEST = _descriptor.Descriptor(
name='UpdateModelAutobuilderRequest',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.UpdateModelAutobuilderRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='item', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.UpdateModelAutobuilderRequest.item', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=751,
serialized_end=872,
)
_UPDATEMODELAUTOBUILDERRESPONSE = _descriptor.Descriptor(
name='UpdateModelAutobuilderResponse',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.UpdateModelAutobuilderResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=874,
serialized_end=906,
)
_GETMODELAUTOBUILDERREQUEST = _descriptor.Descriptor(
name='GetModelAutobuilderRequest',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.GetModelAutobuilderRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='namespace', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.GetModelAutobuilderRequest.namespace', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.GetModelAutobuilderRequest.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=908,
serialized_end=969,
)
_GETMODELAUTOBUILDERRESPONSE = _descriptor.Descriptor(
name='GetModelAutobuilderResponse',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.GetModelAutobuilderResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='item', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.GetModelAutobuilderResponse.item', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='yaml', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.GetModelAutobuilderResponse.yaml', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=972,
serialized_end=1105,
)
_DELETEMODELAUTOBUILDERREQUEST = _descriptor.Descriptor(
name='DeleteModelAutobuilderRequest',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.DeleteModelAutobuilderRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='namespace', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.DeleteModelAutobuilderRequest.namespace', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.DeleteModelAutobuilderRequest.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1107,
serialized_end=1171,
)
_DELETEMODELAUTOBUILDERRESPONSE = _descriptor.Descriptor(
name='DeleteModelAutobuilderResponse',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.DeleteModelAutobuilderResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1173,
serialized_end=1205,
)
_LISTMODELAUTOBUILDERSREQUEST_LABELSENTRY.containing_type = _LISTMODELAUTOBUILDERSREQUEST
_LISTMODELAUTOBUILDERSREQUEST.fields_by_name['labels'].message_type = _LISTMODELAUTOBUILDERSREQUEST_LABELSENTRY
_LISTMODELAUTOBUILDERSRESPONSE.fields_by_name['items'].message_type = github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_training_dot_v1alpha1_dot_generated__pb2._MODELAUTOBUILDERLIST
_CREATEMODELAUTOBUILDERREQUEST.fields_by_name['item'].message_type = github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_training_dot_v1alpha1_dot_generated__pb2._MODELAUTOBUILDER
_UPDATEMODELAUTOBUILDERREQUEST.fields_by_name['item'].message_type = github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_training_dot_v1alpha1_dot_generated__pb2._MODELAUTOBUILDER
_GETMODELAUTOBUILDERRESPONSE.fields_by_name['item'].message_type = github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_training_dot_v1alpha1_dot_generated__pb2._MODELAUTOBUILDER
DESCRIPTOR.message_types_by_name['ListModelAutobuildersRequest'] = _LISTMODELAUTOBUILDERSREQUEST
DESCRIPTOR.message_types_by_name['ListModelAutobuildersResponse'] = _LISTMODELAUTOBUILDERSRESPONSE
DESCRIPTOR.message_types_by_name['CreateModelAutobuilderRequest'] = _CREATEMODELAUTOBUILDERREQUEST
DESCRIPTOR.message_types_by_name['CreateModelAutobuilderResponse'] = _CREATEMODELAUTOBUILDERRESPONSE
DESCRIPTOR.message_types_by_name['UpdateModelAutobuilderRequest'] = _UPDATEMODELAUTOBUILDERREQUEST
DESCRIPTOR.message_types_by_name['UpdateModelAutobuilderResponse'] = _UPDATEMODELAUTOBUILDERRESPONSE
DESCRIPTOR.message_types_by_name['GetModelAutobuilderRequest'] = _GETMODELAUTOBUILDERREQUEST
DESCRIPTOR.message_types_by_name['GetModelAutobuilderResponse'] = _GETMODELAUTOBUILDERRESPONSE
DESCRIPTOR.message_types_by_name['DeleteModelAutobuilderRequest'] = _DELETEMODELAUTOBUILDERREQUEST
DESCRIPTOR.message_types_by_name['DeleteModelAutobuilderResponse'] = _DELETEMODELAUTOBUILDERRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ListModelAutobuildersRequest = _reflection.GeneratedProtocolMessageType('ListModelAutobuildersRequest', (_message.Message,), {
'LabelsEntry' : _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), {
'DESCRIPTOR' : _LISTMODELAUTOBUILDERSREQUEST_LABELSENTRY,
'__module__' : 'github.com.metaprov.modelaapi.services.modelautobuilder.v1.modelautobuilder_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelautobuilder.v1.ListModelAutobuildersRequest.LabelsEntry)
})
,
'DESCRIPTOR' : _LISTMODELAUTOBUILDERSREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.modelautobuilder.v1.modelautobuilder_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelautobuilder.v1.ListModelAutobuildersRequest)
})
_sym_db.RegisterMessage(ListModelAutobuildersRequest)
_sym_db.RegisterMessage(ListModelAutobuildersRequest.LabelsEntry)
ListModelAutobuildersResponse = _reflection.GeneratedProtocolMessageType('ListModelAutobuildersResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTMODELAUTOBUILDERSRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.modelautobuilder.v1.modelautobuilder_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelautobuilder.v1.ListModelAutobuildersResponse)
})
_sym_db.RegisterMessage(ListModelAutobuildersResponse)
CreateModelAutobuilderRequest = _reflection.GeneratedProtocolMessageType('CreateModelAutobuilderRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATEMODELAUTOBUILDERREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.modelautobuilder.v1.modelautobuilder_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelautobuilder.v1.CreateModelAutobuilderRequest)
})
_sym_db.RegisterMessage(CreateModelAutobuilderRequest)
CreateModelAutobuilderResponse = _reflection.GeneratedProtocolMessageType('CreateModelAutobuilderResponse', (_message.Message,), {
'DESCRIPTOR' : _CREATEMODELAUTOBUILDERRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.modelautobuilder.v1.modelautobuilder_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelautobuilder.v1.CreateModelAutobuilderResponse)
})
_sym_db.RegisterMessage(CreateModelAutobuilderResponse)
UpdateModelAutobuilderRequest = _reflection.GeneratedProtocolMessageType('UpdateModelAutobuilderRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATEMODELAUTOBUILDERREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.modelautobuilder.v1.modelautobuilder_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelautobuilder.v1.UpdateModelAutobuilderRequest)
})
_sym_db.RegisterMessage(UpdateModelAutobuilderRequest)
UpdateModelAutobuilderResponse = _reflection.GeneratedProtocolMessageType('UpdateModelAutobuilderResponse', (_message.Message,), {
'DESCRIPTOR' : _UPDATEMODELAUTOBUILDERRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.modelautobuilder.v1.modelautobuilder_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelautobuilder.v1.UpdateModelAutobuilderResponse)
})
_sym_db.RegisterMessage(UpdateModelAutobuilderResponse)
GetModelAutobuilderRequest = _reflection.GeneratedProtocolMessageType('GetModelAutobuilderRequest', (_message.Message,), {
'DESCRIPTOR' : _GETMODELAUTOBUILDERREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.modelautobuilder.v1.modelautobuilder_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelautobuilder.v1.GetModelAutobuilderRequest)
})
_sym_db.RegisterMessage(GetModelAutobuilderRequest)
GetModelAutobuilderResponse = _reflection.GeneratedProtocolMessageType('GetModelAutobuilderResponse', (_message.Message,), {
'DESCRIPTOR' : _GETMODELAUTOBUILDERRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.modelautobuilder.v1.modelautobuilder_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelautobuilder.v1.GetModelAutobuilderResponse)
})
_sym_db.RegisterMessage(GetModelAutobuilderResponse)
DeleteModelAutobuilderRequest = _reflection.GeneratedProtocolMessageType('DeleteModelAutobuilderRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETEMODELAUTOBUILDERREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.modelautobuilder.v1.modelautobuilder_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelautobuilder.v1.DeleteModelAutobuilderRequest)
})
_sym_db.RegisterMessage(DeleteModelAutobuilderRequest)
DeleteModelAutobuilderResponse = _reflection.GeneratedProtocolMessageType('DeleteModelAutobuilderResponse', (_message.Message,), {
'DESCRIPTOR' : _DELETEMODELAUTOBUILDERRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.modelautobuilder.v1.modelautobuilder_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelautobuilder.v1.DeleteModelAutobuilderResponse)
})
_sym_db.RegisterMessage(DeleteModelAutobuilderResponse)
DESCRIPTOR._options = None
_LISTMODELAUTOBUILDERSREQUEST_LABELSENTRY._options = None
_MODELAUTOBUILDERSERVICE = _descriptor.ServiceDescriptor(
name='ModelAutobuilderService',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ModelAutobuilderService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=1208,
serialized_end=2529,
methods=[
_descriptor.MethodDescriptor(
name='ListModelAutobuilders',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ModelAutobuilderService.ListModelAutobuilders',
index=0,
containing_service=None,
input_type=_LISTMODELAUTOBUILDERSREQUEST,
output_type=_LISTMODELAUTOBUILDERSRESPONSE,
serialized_options=b'\202\323\344\223\002#\022!/v1/modelautobuilders/{namespace}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='CreateModelAutobuilder',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ModelAutobuilderService.CreateModelAutobuilder',
index=1,
containing_service=None,
input_type=_CREATEMODELAUTOBUILDERREQUEST,
output_type=_CREATEMODELAUTOBUILDERRESPONSE,
serialized_options=b'\202\323\344\223\002\032\"\025/v1/modelautobuilders:\001*',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetModelAutobuilder',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ModelAutobuilderService.GetModelAutobuilder',
index=2,
containing_service=None,
input_type=_GETMODELAUTOBUILDERREQUEST,
output_type=_GETMODELAUTOBUILDERRESPONSE,
serialized_options=b'\202\323\344\223\002*\022(/v1/modelautobuilders/{namespace}/{name}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='UpdateModelAutobuilder',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ModelAutobuilderService.UpdateModelAutobuilder',
index=3,
containing_service=None,
input_type=_UPDATEMODELAUTOBUILDERREQUEST,
output_type=_UPDATEMODELAUTOBUILDERRESPONSE,
serialized_options=b'\202\323\344\223\002I\032D/v1/modelautobuilders/{item.metadata.namespace}/{item.metadata.name}:\001*',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='DeleteModelAutobuilder',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ModelAutobuilderService.DeleteModelAutobuilder',
index=4,
containing_service=None,
input_type=_DELETEMODELAUTOBUILDERREQUEST,
output_type=_DELETEMODELAUTOBUILDERRESPONSE,
serialized_options=b'\202\323\344\223\002**(/v1/modelautobuilders/{namespace}/{name}',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_MODELAUTOBUILDERSERVICE)
DESCRIPTOR.services_by_name['ModelAutobuilderService'] = _MODELAUTOBUILDERSERVICE
# @@protoc_insertion_point(module_scope)
| 48.092692 | 3,212 | 0.807969 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from github.com.metaprov.modelaapi.pkg.apis.training.v1alpha1 import generated_pb2 as github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_training_dot_v1alpha1_dot_generated__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='github.com/metaprov/modelaapi/services/modelautobuilder/v1/modelautobuilder.proto',
package='github.com.metaprov.modelaapi.services.modelautobuilder.v1',
syntax='proto3',
serialized_options=b'Z:github.com/metaprov/modelaapi/services/modelautobuilder/v1',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\nQgithub.com/metaprov/modelaapi/services/modelautobuilder/v1/modelautobuilder.proto\x12:github.com.metaprov.modelaapi.services.modelautobuilder.v1\x1a\x1cgoogle/api/annotations.proto\x1aHgithub.com/metaprov/modelaapi/pkg/apis/training/v1alpha1/generated.proto\"\xd6\x01\n\x1cListModelAutobuildersRequest\x12\x11\n\tnamespace\x18\x01 \x01(\t\x12t\n\x06labels\x18\x02 \x03(\x0b\x32\x64.github.com.metaprov.modelaapi.services.modelautobuilder.v1.ListModelAutobuildersRequest.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"~\n\x1dListModelAutobuildersResponse\x12]\n\x05items\x18\x01 \x01(\x0b\x32N.github.com.metaprov.modelaapi.pkg.apis.training.v1alpha1.ModelAutobuilderList\"y\n\x1d\x43reateModelAutobuilderRequest\x12X\n\x04item\x18\x01 \x01(\x0b\x32J.github.com.metaprov.modelaapi.pkg.apis.training.v1alpha1.ModelAutobuilder\" \n\x1e\x43reateModelAutobuilderResponse\"y\n\x1dUpdateModelAutobuilderRequest\x12X\n\x04item\x18\x01 \x01(\x0b\x32J.github.com.metaprov.modelaapi.pkg.apis.training.v1alpha1.ModelAutobuilder\" \n\x1eUpdateModelAutobuilderResponse\"=\n\x1aGetModelAutobuilderRequest\x12\x11\n\tnamespace\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"\x85\x01\n\x1bGetModelAutobuilderResponse\x12X\n\x04item\x18\x01 \x01(\x0b\x32J.github.com.metaprov.modelaapi.pkg.apis.training.v1alpha1.ModelAutobuilder\x12\x0c\n\x04yaml\x18\x02 \x01(\t\"@\n\x1d\x44\x65leteModelAutobuilderRequest\x12\x11\n\tnamespace\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\" \n\x1e\x44\x65leteModelAutobuilderResponse2\xa9\n\n\x17ModelAutobuilderService\x12\xf7\x01\n\x15ListModelAutobuilders\x12X.github.com.metaprov.modelaapi.services.modelautobuilder.v1.ListModelAutobuildersRequest\x1aY.github.com.metaprov.modelaapi.services.modelautobuilder.v1.ListModelAutobuildersResponse\")\x82\xd3\xe4\x93\x02#\x12!/v1/modelautobuilders/{namespace}\x12\xf1\x01\n\x16\x43reateModelAutobuilder\x12Y.github.com.metaprov.modelaapi.services.modelautobuilder.v1.CreateModelAutobuilderRequest\x1aZ.github.com.metaprov.modelaapi.services.modelautobuilder.v1.CreateModelAutobuilderResponse\" \x82\xd3\xe4\x93\x02\x1a\"\x15/v1/modelautobuilders:\x01*\x12\xf8\x01\n\x13GetModelAutobuilder\x12V.github.com.metaprov.modelaapi.services.modelautobuilder.v1.GetModelAutobuilderRequest\x1aW.github.com.metaprov.modelaapi.services.modelautobuilder.v1.GetModelAutobuilderResponse\"0\x82\xd3\xe4\x93\x02*\x12(/v1/modelautobuilders/{namespace}/{name}\x12\xa0\x02\n\x16UpdateModelAutobuilder\x12Y.github.com.metaprov.modelaapi.services.modelautobuilder.v1.UpdateModelAutobuilderRequest\x1aZ.github.com.metaprov.modelaapi.services.modelautobuilder.v1.UpdateModelAutobuilderResponse\"O\x82\xd3\xe4\x93\x02I\x1a\x44/v1/modelautobuilders/{item.metadata.namespace}/{item.metadata.name}:\x01*\x12\x81\x02\n\x16\x44\x65leteModelAutobuilder\x12Y.github.com.metaprov.modelaapi.services.modelautobuilder.v1.DeleteModelAutobuilderRequest\x1aZ.github.com.metaprov.modelaapi.services.modelautobuilder.v1.DeleteModelAutobuilderResponse\"0\x82\xd3\xe4\x93\x02**(/v1/modelautobuilders/{namespace}/{name}B<Z:github.com/metaprov/modelaapi/services/modelautobuilder/v1b\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_training_dot_v1alpha1_dot_generated__pb2.DESCRIPTOR,])
_LISTMODELAUTOBUILDERSREQUEST_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ListModelAutobuildersRequest.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ListModelAutobuildersRequest.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ListModelAutobuildersRequest.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=419,
serialized_end=464,
)
_LISTMODELAUTOBUILDERSREQUEST = _descriptor.Descriptor(
name='ListModelAutobuildersRequest',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ListModelAutobuildersRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='namespace', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ListModelAutobuildersRequest.namespace', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='labels', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ListModelAutobuildersRequest.labels', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_LISTMODELAUTOBUILDERSREQUEST_LABELSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=250,
serialized_end=464,
)
_LISTMODELAUTOBUILDERSRESPONSE = _descriptor.Descriptor(
name='ListModelAutobuildersResponse',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ListModelAutobuildersResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='items', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ListModelAutobuildersResponse.items', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=466,
serialized_end=592,
)
_CREATEMODELAUTOBUILDERREQUEST = _descriptor.Descriptor(
name='CreateModelAutobuilderRequest',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.CreateModelAutobuilderRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='item', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.CreateModelAutobuilderRequest.item', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=594,
serialized_end=715,
)
_CREATEMODELAUTOBUILDERRESPONSE = _descriptor.Descriptor(
name='CreateModelAutobuilderResponse',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.CreateModelAutobuilderResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=717,
serialized_end=749,
)
_UPDATEMODELAUTOBUILDERREQUEST = _descriptor.Descriptor(
name='UpdateModelAutobuilderRequest',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.UpdateModelAutobuilderRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='item', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.UpdateModelAutobuilderRequest.item', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=751,
serialized_end=872,
)
_UPDATEMODELAUTOBUILDERRESPONSE = _descriptor.Descriptor(
name='UpdateModelAutobuilderResponse',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.UpdateModelAutobuilderResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=874,
serialized_end=906,
)
_GETMODELAUTOBUILDERREQUEST = _descriptor.Descriptor(
name='GetModelAutobuilderRequest',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.GetModelAutobuilderRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='namespace', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.GetModelAutobuilderRequest.namespace', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.GetModelAutobuilderRequest.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=908,
serialized_end=969,
)
_GETMODELAUTOBUILDERRESPONSE = _descriptor.Descriptor(
name='GetModelAutobuilderResponse',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.GetModelAutobuilderResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='item', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.GetModelAutobuilderResponse.item', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='yaml', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.GetModelAutobuilderResponse.yaml', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=972,
serialized_end=1105,
)
_DELETEMODELAUTOBUILDERREQUEST = _descriptor.Descriptor(
name='DeleteModelAutobuilderRequest',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.DeleteModelAutobuilderRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='namespace', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.DeleteModelAutobuilderRequest.namespace', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.DeleteModelAutobuilderRequest.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1107,
serialized_end=1171,
)
_DELETEMODELAUTOBUILDERRESPONSE = _descriptor.Descriptor(
name='DeleteModelAutobuilderResponse',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.DeleteModelAutobuilderResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1173,
serialized_end=1205,
)
_LISTMODELAUTOBUILDERSREQUEST_LABELSENTRY.containing_type = _LISTMODELAUTOBUILDERSREQUEST
_LISTMODELAUTOBUILDERSREQUEST.fields_by_name['labels'].message_type = _LISTMODELAUTOBUILDERSREQUEST_LABELSENTRY
_LISTMODELAUTOBUILDERSRESPONSE.fields_by_name['items'].message_type = github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_training_dot_v1alpha1_dot_generated__pb2._MODELAUTOBUILDERLIST
_CREATEMODELAUTOBUILDERREQUEST.fields_by_name['item'].message_type = github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_training_dot_v1alpha1_dot_generated__pb2._MODELAUTOBUILDER
_UPDATEMODELAUTOBUILDERREQUEST.fields_by_name['item'].message_type = github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_training_dot_v1alpha1_dot_generated__pb2._MODELAUTOBUILDER
_GETMODELAUTOBUILDERRESPONSE.fields_by_name['item'].message_type = github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_training_dot_v1alpha1_dot_generated__pb2._MODELAUTOBUILDER
DESCRIPTOR.message_types_by_name['ListModelAutobuildersRequest'] = _LISTMODELAUTOBUILDERSREQUEST
DESCRIPTOR.message_types_by_name['ListModelAutobuildersResponse'] = _LISTMODELAUTOBUILDERSRESPONSE
DESCRIPTOR.message_types_by_name['CreateModelAutobuilderRequest'] = _CREATEMODELAUTOBUILDERREQUEST
DESCRIPTOR.message_types_by_name['CreateModelAutobuilderResponse'] = _CREATEMODELAUTOBUILDERRESPONSE
DESCRIPTOR.message_types_by_name['UpdateModelAutobuilderRequest'] = _UPDATEMODELAUTOBUILDERREQUEST
DESCRIPTOR.message_types_by_name['UpdateModelAutobuilderResponse'] = _UPDATEMODELAUTOBUILDERRESPONSE
DESCRIPTOR.message_types_by_name['GetModelAutobuilderRequest'] = _GETMODELAUTOBUILDERREQUEST
DESCRIPTOR.message_types_by_name['GetModelAutobuilderResponse'] = _GETMODELAUTOBUILDERRESPONSE
DESCRIPTOR.message_types_by_name['DeleteModelAutobuilderRequest'] = _DELETEMODELAUTOBUILDERREQUEST
DESCRIPTOR.message_types_by_name['DeleteModelAutobuilderResponse'] = _DELETEMODELAUTOBUILDERRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ListModelAutobuildersRequest = _reflection.GeneratedProtocolMessageType('ListModelAutobuildersRequest', (_message.Message,), {
'LabelsEntry' : _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), {
'DESCRIPTOR' : _LISTMODELAUTOBUILDERSREQUEST_LABELSENTRY,
'__module__' : 'github.com.metaprov.modelaapi.services.modelautobuilder.v1.modelautobuilder_pb2'
})
,
'DESCRIPTOR' : _LISTMODELAUTOBUILDERSREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.modelautobuilder.v1.modelautobuilder_pb2'
})
_sym_db.RegisterMessage(ListModelAutobuildersRequest)
_sym_db.RegisterMessage(ListModelAutobuildersRequest.LabelsEntry)
ListModelAutobuildersResponse = _reflection.GeneratedProtocolMessageType('ListModelAutobuildersResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTMODELAUTOBUILDERSRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.modelautobuilder.v1.modelautobuilder_pb2'
})
_sym_db.RegisterMessage(ListModelAutobuildersResponse)
CreateModelAutobuilderRequest = _reflection.GeneratedProtocolMessageType('CreateModelAutobuilderRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATEMODELAUTOBUILDERREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.modelautobuilder.v1.modelautobuilder_pb2'
})
_sym_db.RegisterMessage(CreateModelAutobuilderRequest)
CreateModelAutobuilderResponse = _reflection.GeneratedProtocolMessageType('CreateModelAutobuilderResponse', (_message.Message,), {
'DESCRIPTOR' : _CREATEMODELAUTOBUILDERRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.modelautobuilder.v1.modelautobuilder_pb2'
})
_sym_db.RegisterMessage(CreateModelAutobuilderResponse)
UpdateModelAutobuilderRequest = _reflection.GeneratedProtocolMessageType('UpdateModelAutobuilderRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATEMODELAUTOBUILDERREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.modelautobuilder.v1.modelautobuilder_pb2'
})
_sym_db.RegisterMessage(UpdateModelAutobuilderRequest)
UpdateModelAutobuilderResponse = _reflection.GeneratedProtocolMessageType('UpdateModelAutobuilderResponse', (_message.Message,), {
'DESCRIPTOR' : _UPDATEMODELAUTOBUILDERRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.modelautobuilder.v1.modelautobuilder_pb2'
})
_sym_db.RegisterMessage(UpdateModelAutobuilderResponse)
GetModelAutobuilderRequest = _reflection.GeneratedProtocolMessageType('GetModelAutobuilderRequest', (_message.Message,), {
'DESCRIPTOR' : _GETMODELAUTOBUILDERREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.modelautobuilder.v1.modelautobuilder_pb2'
})
_sym_db.RegisterMessage(GetModelAutobuilderRequest)
GetModelAutobuilderResponse = _reflection.GeneratedProtocolMessageType('GetModelAutobuilderResponse', (_message.Message,), {
'DESCRIPTOR' : _GETMODELAUTOBUILDERRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.modelautobuilder.v1.modelautobuilder_pb2'
})
_sym_db.RegisterMessage(GetModelAutobuilderResponse)
DeleteModelAutobuilderRequest = _reflection.GeneratedProtocolMessageType('DeleteModelAutobuilderRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETEMODELAUTOBUILDERREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.modelautobuilder.v1.modelautobuilder_pb2'
})
_sym_db.RegisterMessage(DeleteModelAutobuilderRequest)
DeleteModelAutobuilderResponse = _reflection.GeneratedProtocolMessageType('DeleteModelAutobuilderResponse', (_message.Message,), {
'DESCRIPTOR' : _DELETEMODELAUTOBUILDERRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.modelautobuilder.v1.modelautobuilder_pb2'
})
_sym_db.RegisterMessage(DeleteModelAutobuilderResponse)
DESCRIPTOR._options = None
_LISTMODELAUTOBUILDERSREQUEST_LABELSENTRY._options = None
_MODELAUTOBUILDERSERVICE = _descriptor.ServiceDescriptor(
name='ModelAutobuilderService',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ModelAutobuilderService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=1208,
serialized_end=2529,
methods=[
_descriptor.MethodDescriptor(
name='ListModelAutobuilders',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ModelAutobuilderService.ListModelAutobuilders',
index=0,
containing_service=None,
input_type=_LISTMODELAUTOBUILDERSREQUEST,
output_type=_LISTMODELAUTOBUILDERSRESPONSE,
serialized_options=b'\202\323\344\223\002#\022!/v1/modelautobuilders/{namespace}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='CreateModelAutobuilder',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ModelAutobuilderService.CreateModelAutobuilder',
index=1,
containing_service=None,
input_type=_CREATEMODELAUTOBUILDERREQUEST,
output_type=_CREATEMODELAUTOBUILDERRESPONSE,
serialized_options=b'\202\323\344\223\002\032\"\025/v1/modelautobuilders:\001*',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetModelAutobuilder',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ModelAutobuilderService.GetModelAutobuilder',
index=2,
containing_service=None,
input_type=_GETMODELAUTOBUILDERREQUEST,
output_type=_GETMODELAUTOBUILDERRESPONSE,
serialized_options=b'\202\323\344\223\002*\022(/v1/modelautobuilders/{namespace}/{name}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='UpdateModelAutobuilder',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ModelAutobuilderService.UpdateModelAutobuilder',
index=3,
containing_service=None,
input_type=_UPDATEMODELAUTOBUILDERREQUEST,
output_type=_UPDATEMODELAUTOBUILDERRESPONSE,
serialized_options=b'\202\323\344\223\002I\032D/v1/modelautobuilders/{item.metadata.namespace}/{item.metadata.name}:\001*',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='DeleteModelAutobuilder',
full_name='github.com.metaprov.modelaapi.services.modelautobuilder.v1.ModelAutobuilderService.DeleteModelAutobuilder',
index=4,
containing_service=None,
input_type=_DELETEMODELAUTOBUILDERREQUEST,
output_type=_DELETEMODELAUTOBUILDERRESPONSE,
serialized_options=b'\202\323\344\223\002**(/v1/modelautobuilders/{namespace}/{name}',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_MODELAUTOBUILDERSERVICE)
DESCRIPTOR.services_by_name['ModelAutobuilderService'] = _MODELAUTOBUILDERSERVICE
# @@protoc_insertion_point(module_scope)
| true | true |
f720f1e95b326e40c9aeac42acdf9e1f3addaa58 | 753 | py | Python | tests/instructions/test_tfr.py | rob-smallshire/asm68 | a9bbb99e7a7fbbe7656815df488c74606d08b252 | [
"X11"
] | null | null | null | tests/instructions/test_tfr.py | rob-smallshire/asm68 | a9bbb99e7a7fbbe7656815df488c74606d08b252 | [
"X11"
] | null | null | null | tests/instructions/test_tfr.py | rob-smallshire/asm68 | a9bbb99e7a7fbbe7656815df488c74606d08b252 | [
"X11"
] | 1 | 2018-05-08T11:03:22.000Z | 2018-05-08T11:03:22.000Z | from asm68.registers import *
from asm68.mnemonics import TFR
from asm68.asmdsl import AsmDsl, statements
from asm68.assembler import assemble, InterRegisterError
from helpers.code import check_object_code
from pytest import raises
def test_tfr_a_a():
check_object_code('1F 88', TFR, (A, A))
def test_tfr_a_b():
check_object_code('1F 89', TFR, (A, B))
def test_tfr_x_y():
check_object_code('1F 12', TFR, (X, Y))
def test_tfr_md_a_raises_inter_register_error():
asm = AsmDsl()
asm(TFR, (MD, A))
with raises(InterRegisterError):
assemble(statements(asm))
def test_tfr_s_z_raises_inter_register_error():
asm = AsmDsl()
asm(TFR, (S, Q))
with raises(InterRegisterError):
assemble(statements(asm)) | 23.53125 | 56 | 0.718459 | from asm68.registers import *
from asm68.mnemonics import TFR
from asm68.asmdsl import AsmDsl, statements
from asm68.assembler import assemble, InterRegisterError
from helpers.code import check_object_code
from pytest import raises
def test_tfr_a_a():
check_object_code('1F 88', TFR, (A, A))
def test_tfr_a_b():
check_object_code('1F 89', TFR, (A, B))
def test_tfr_x_y():
check_object_code('1F 12', TFR, (X, Y))
def test_tfr_md_a_raises_inter_register_error():
asm = AsmDsl()
asm(TFR, (MD, A))
with raises(InterRegisterError):
assemble(statements(asm))
def test_tfr_s_z_raises_inter_register_error():
asm = AsmDsl()
asm(TFR, (S, Q))
with raises(InterRegisterError):
assemble(statements(asm)) | true | true |
f720f269f987186e910ee271a51453fc316eb7d7 | 4,231 | py | Python | tests/sender/cli.py | OvidiuMM/python-sdk | 8e5c4e5b00de1269f75d44e7614d2d8d5c934b3b | [
"MIT"
] | 2 | 2020-07-20T09:07:12.000Z | 2020-07-20T09:56:21.000Z | tests/sender/cli.py | OvidiuMM/python-sdk | 8e5c4e5b00de1269f75d44e7614d2d8d5c934b3b | [
"MIT"
] | null | null | null | tests/sender/cli.py | OvidiuMM/python-sdk | 8e5c4e5b00de1269f75d44e7614d2d8d5c934b3b | [
"MIT"
] | null | null | null | import unittest
import socket
from click.testing import CliRunner
from devo.common import Configuration
from devo.sender.scripts.sender_cli import data
from devo.sender import DevoSenderException
try:
from .load_certs import *
except ImportError:
from load_certs import *
class TestSender(unittest.TestCase):
def setUp(self):
self.address = os.getenv('DEVO_SENDER_SERVER', "127.0.0.1")
self.port = int(os.getenv('DEVO_SENDER_PORT', 4488))
self.tcp_address = os.getenv('DEVO_SENDER_TCP_SERVER', "127.0.0.1")
self.tcp_port = int(os.getenv('DEVO_SENDER_TCP_PORT', 4489))
self.key = os.getenv('DEVO_SENDER_KEY', CLIENT_KEY)
self.cert = os.getenv('DEVO_SENDER_CERT', CLIENT_CERT)
self.chain = os.getenv('DEVO_SENDER_CHAIN', CLIENT_CHAIN)
self.local_key = os.getenv(CLIENT_KEY)
self.test_tcp = os.getenv('DEVO_TEST_TCP', "True")
self.my_app = 'test.drop.free'
self.my_bapp = b'test.drop.free'
self.my_date = 'my.date.test.sender'
self.test_file = "".join((os.path.dirname(os.path.abspath(__file__)),
os.sep, "testfile_multiline.txt"))
self.test_msg = 'Test send msg\n'
self.localhost = socket.gethostname()
# change this value if you want to send another number of test string
self.default_numbers_sendings = 10
configuration = Configuration()
configuration.set("sender", {
"key": self.key, "cert": self.cert, "chain": self.chain,
"address": self.address, "port": self.port,
"verify_mode": 0, "check_hostname": False
})
self.config_path = "/tmp/devo_sender_tests_config.json"
configuration.save(path=self.config_path)
def test_sender_args(self):
runner = CliRunner()
result = runner.invoke(data, [])
self.assertIn('No address', result.stdout)
def test_bad_address(self):
runner = CliRunner()
result = runner.invoke(data, ["--debug",
"--address", self.address + "asd"])
self.assertIsInstance(result.exception, DevoSenderException)
self.assertIn("Name or service not known",
result.exception.args[0])
def test_bad_certs(self):
runner = CliRunner()
result = runner.invoke(data, ["--debug",
"--address",
"collector-us.devo.io",
"--port", "443",
"--key", self.local_key,
"--cert", self.cert,
"--chain", self.chain,
"--verify_mode", 0,
'--check_hostname', False])
self.assertIsInstance(result.exception, DevoSenderException)
self.assertIn("SSL conn establishment socket error",
result.exception.args[0])
def test_normal_send(self):
runner = CliRunner()
result = runner.invoke(data, ["--debug",
"--address", self.address,
"--port", self.port,
"--key", self.key,
"--cert", self.cert,
"--chain", self.chain,
"--tag", self.my_app,
"--verify_mode", 0,
'--check_hostname', False,
"--line", "Test line"])
self.assertIsNone(result.exception)
self.assertGreater(int(result.output.split("Sended: ")[-1]), 0)
def test_with_config_file(self):
if self.config_path:
runner = CliRunner()
result = runner.invoke(data, ["--debug",
"--config", self.config_path])
self.assertIsNone(result.exception)
self.assertGreater(int(result.output.split("Sended: ")[-1]), 0)
if __name__ == '__main__':
unittest.main()
| 40.295238 | 77 | 0.523044 | import unittest
import socket
from click.testing import CliRunner
from devo.common import Configuration
from devo.sender.scripts.sender_cli import data
from devo.sender import DevoSenderException
try:
from .load_certs import *
except ImportError:
from load_certs import *
class TestSender(unittest.TestCase):
def setUp(self):
self.address = os.getenv('DEVO_SENDER_SERVER', "127.0.0.1")
self.port = int(os.getenv('DEVO_SENDER_PORT', 4488))
self.tcp_address = os.getenv('DEVO_SENDER_TCP_SERVER', "127.0.0.1")
self.tcp_port = int(os.getenv('DEVO_SENDER_TCP_PORT', 4489))
self.key = os.getenv('DEVO_SENDER_KEY', CLIENT_KEY)
self.cert = os.getenv('DEVO_SENDER_CERT', CLIENT_CERT)
self.chain = os.getenv('DEVO_SENDER_CHAIN', CLIENT_CHAIN)
self.local_key = os.getenv(CLIENT_KEY)
self.test_tcp = os.getenv('DEVO_TEST_TCP', "True")
self.my_app = 'test.drop.free'
self.my_bapp = b'test.drop.free'
self.my_date = 'my.date.test.sender'
self.test_file = "".join((os.path.dirname(os.path.abspath(__file__)),
os.sep, "testfile_multiline.txt"))
self.test_msg = 'Test send msg\n'
self.localhost = socket.gethostname()
self.default_numbers_sendings = 10
configuration = Configuration()
configuration.set("sender", {
"key": self.key, "cert": self.cert, "chain": self.chain,
"address": self.address, "port": self.port,
"verify_mode": 0, "check_hostname": False
})
self.config_path = "/tmp/devo_sender_tests_config.json"
configuration.save(path=self.config_path)
def test_sender_args(self):
runner = CliRunner()
result = runner.invoke(data, [])
self.assertIn('No address', result.stdout)
def test_bad_address(self):
runner = CliRunner()
result = runner.invoke(data, ["--debug",
"--address", self.address + "asd"])
self.assertIsInstance(result.exception, DevoSenderException)
self.assertIn("Name or service not known",
result.exception.args[0])
def test_bad_certs(self):
runner = CliRunner()
result = runner.invoke(data, ["--debug",
"--address",
"collector-us.devo.io",
"--port", "443",
"--key", self.local_key,
"--cert", self.cert,
"--chain", self.chain,
"--verify_mode", 0,
'--check_hostname', False])
self.assertIsInstance(result.exception, DevoSenderException)
self.assertIn("SSL conn establishment socket error",
result.exception.args[0])
def test_normal_send(self):
runner = CliRunner()
result = runner.invoke(data, ["--debug",
"--address", self.address,
"--port", self.port,
"--key", self.key,
"--cert", self.cert,
"--chain", self.chain,
"--tag", self.my_app,
"--verify_mode", 0,
'--check_hostname', False,
"--line", "Test line"])
self.assertIsNone(result.exception)
self.assertGreater(int(result.output.split("Sended: ")[-1]), 0)
def test_with_config_file(self):
if self.config_path:
runner = CliRunner()
result = runner.invoke(data, ["--debug",
"--config", self.config_path])
self.assertIsNone(result.exception)
self.assertGreater(int(result.output.split("Sended: ")[-1]), 0)
if __name__ == '__main__':
unittest.main()
| true | true |
f720f373767dfe318e91d21f618da8dedddfa285 | 3,700 | py | Python | examples/poisson_test.py | intact-solutions/pysparse | f3dca3ae9d02ab3f49486fbae5d9d68059a318ab | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | examples/poisson_test.py | intact-solutions/pysparse | f3dca3ae9d02ab3f49486fbae5d9d68059a318ab | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | examples/poisson_test.py | intact-solutions/pysparse | f3dca3ae9d02ab3f49486fbae5d9d68059a318ab | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | import numpy as np
import math
from pysparse.sparse import spmatrix
from pysparse.itsolvers.krylov import pcg, qmrs
from pysparse.precon import precon
import time
def poisson2d(n):
L = spmatrix.ll_mat(n*n, n*n)
for i in range(n):
for j in range(n):
k = i + n*j
L[k,k] = 4
if i > 0:
L[k,k-1] = -1
if i < n-1:
L[k,k+1] = -1
if j > 0:
L[k,k-n] = -1
if j < n-1:
L[k,k+n] = -1
return L
def poisson2d_sym(n):
L = spmatrix.ll_mat_sym(n*n)
for i in range(n):
for j in range(n):
k = i + n*j
L[k,k] = 4
if i > 0:
L[k,k-1] = -1
if j > 0:
L[k,k-n] = -1
return L
def poisson2d_sym_blk(n):
L = spmatrix.ll_mat_sym(n*n)
I = spmatrix.ll_mat_sym(n)
P = spmatrix.ll_mat_sym(n)
for i in range(n):
I[i,i] = -1
for i in range(n):
P[i,i] = 4
if i > 0: P[i,i-1] = -1
for i in range(0, n*n, n):
L[i:i+n,i:i+n] = P
if i > 0: L[i:i+n,i-n:i] = I
return L
tol = 1e-8
n = 100
t1 = time.clock()
L = poisson2d_sym_blk(n)
print('Time for constructing the matrix using poisson2d_sym_blk: %8.2f sec' % (time.clock() - t1, ))
t1 = time.clock()
L = poisson2d_sym(n)
print('Time for constructing the matrix using poisson2d_sym : %8.2f sec' % (time.clock() - t1, ))
t1 = time.clock()
L = poisson2d(n)
print('Time for constructing the matrix using poisson2d : %8.2f sec' % (time.clock() - t1, ))
A = L.to_csr()
S = L.to_sss()
print(L.nnz)
print(S.nnz)
print(A.nnz)
b = np.ones(n*n, 'd')
# -----------------------------------------------------------------------------
t1 = time.clock()
x = np.empty(n*n, 'd')
info, iter, relres = pcg(S, b, x, tol, 2000)
print('info=%d, iter=%d, relres=%e' % (info, iter, relres))
print('Solve time using SSS matrix: %8.2f s' % (time.clock() - t1))
print('norm(x) = %g' % np.linalg.norm(x))
r = np.empty(n*n, 'd')
S.matvec(x, r)
r = b - r
print('norm(b - A*x) = %g' % np.linalg.norm(r))
print(x[0:10])
# -----------------------------------------------------------------------------
t1 = time.clock()
x = np.empty(n*n, 'd')
info, iter, relres = pcg(A, b, x, tol, 2000)
print('info=%d, iter=%d, relres=%e' % (info, iter, relres))
print('Solve time using CSR matrix: %8.2f sec' % (time.clock() - t1))
print('norm(x) = %g' % np.linalg.norm(x))
r = np.empty(n*n, 'd')
A.matvec(x, r)
r = b - r
print('norm(b - A*x) = %g' % np.linalg.norm(r))
# -----------------------------------------------------------------------------
t1 = time.clock()
x = np.empty(n*n, 'd')
info, iter, relres = pcg(L, b, x, tol, 2000)
print('info=%d, iter=%d, relres=%e' % (info, iter, relres))
print('Solve time using LL matrix: %8.2f sec' % (time.clock() - t1))
print('norm(x) = %g' % np.linalg.norm(x))
r = np.empty(n*n, 'd')
A.matvec(x, r)
r = b - r
print('norm(b - A*x) = %g' % np.linalg.norm(r))
# -----------------------------------------------------------------------------
K_ssor = precon.ssor(S, 1.9)
t1 = time.clock()
x = np.empty(n*n, 'd')
info, iter, relres = pcg(S, b, x, tol, 2000, K_ssor)
print('info=%d, iter=%d, relres=%e' % (info, iter, relres))
print('Solve time using SSS matrix and SSOR preconditioner: %8.2f sec' % (time.clock() - t1))
print('norm(x) = %g' % np.linalg.norm(x))
r = np.empty(n*n, 'd')
S.matvec(x, r)
r = b - r
print('norm(b - A*x) = %g' % np.linalg.norm(r))
# -----------------------------------------------------------------------------
from pysparse.eigen import jdsym
jdsym.jdsym(S, None, None, 5, 0.0, 1e-8, 100, qmrs, clvl=1)
| 25 | 100 | 0.481081 | import numpy as np
import math
from pysparse.sparse import spmatrix
from pysparse.itsolvers.krylov import pcg, qmrs
from pysparse.precon import precon
import time
def poisson2d(n):
L = spmatrix.ll_mat(n*n, n*n)
for i in range(n):
for j in range(n):
k = i + n*j
L[k,k] = 4
if i > 0:
L[k,k-1] = -1
if i < n-1:
L[k,k+1] = -1
if j > 0:
L[k,k-n] = -1
if j < n-1:
L[k,k+n] = -1
return L
def poisson2d_sym(n):
L = spmatrix.ll_mat_sym(n*n)
for i in range(n):
for j in range(n):
k = i + n*j
L[k,k] = 4
if i > 0:
L[k,k-1] = -1
if j > 0:
L[k,k-n] = -1
return L
def poisson2d_sym_blk(n):
L = spmatrix.ll_mat_sym(n*n)
I = spmatrix.ll_mat_sym(n)
P = spmatrix.ll_mat_sym(n)
for i in range(n):
I[i,i] = -1
for i in range(n):
P[i,i] = 4
if i > 0: P[i,i-1] = -1
for i in range(0, n*n, n):
L[i:i+n,i:i+n] = P
if i > 0: L[i:i+n,i-n:i] = I
return L
tol = 1e-8
n = 100
t1 = time.clock()
L = poisson2d_sym_blk(n)
print('Time for constructing the matrix using poisson2d_sym_blk: %8.2f sec' % (time.clock() - t1, ))
t1 = time.clock()
L = poisson2d_sym(n)
print('Time for constructing the matrix using poisson2d_sym : %8.2f sec' % (time.clock() - t1, ))
t1 = time.clock()
L = poisson2d(n)
print('Time for constructing the matrix using poisson2d : %8.2f sec' % (time.clock() - t1, ))
A = L.to_csr()
S = L.to_sss()
print(L.nnz)
print(S.nnz)
print(A.nnz)
b = np.ones(n*n, 'd')
t1 = time.clock()
x = np.empty(n*n, 'd')
info, iter, relres = pcg(S, b, x, tol, 2000)
print('info=%d, iter=%d, relres=%e' % (info, iter, relres))
print('Solve time using SSS matrix: %8.2f s' % (time.clock() - t1))
print('norm(x) = %g' % np.linalg.norm(x))
r = np.empty(n*n, 'd')
S.matvec(x, r)
r = b - r
print('norm(b - A*x) = %g' % np.linalg.norm(r))
print(x[0:10])
t1 = time.clock()
x = np.empty(n*n, 'd')
info, iter, relres = pcg(A, b, x, tol, 2000)
print('info=%d, iter=%d, relres=%e' % (info, iter, relres))
print('Solve time using CSR matrix: %8.2f sec' % (time.clock() - t1))
print('norm(x) = %g' % np.linalg.norm(x))
r = np.empty(n*n, 'd')
A.matvec(x, r)
r = b - r
print('norm(b - A*x) = %g' % np.linalg.norm(r))
t1 = time.clock()
x = np.empty(n*n, 'd')
info, iter, relres = pcg(L, b, x, tol, 2000)
print('info=%d, iter=%d, relres=%e' % (info, iter, relres))
print('Solve time using LL matrix: %8.2f sec' % (time.clock() - t1))
print('norm(x) = %g' % np.linalg.norm(x))
r = np.empty(n*n, 'd')
A.matvec(x, r)
r = b - r
print('norm(b - A*x) = %g' % np.linalg.norm(r))
K_ssor = precon.ssor(S, 1.9)
t1 = time.clock()
x = np.empty(n*n, 'd')
info, iter, relres = pcg(S, b, x, tol, 2000, K_ssor)
print('info=%d, iter=%d, relres=%e' % (info, iter, relres))
print('Solve time using SSS matrix and SSOR preconditioner: %8.2f sec' % (time.clock() - t1))
print('norm(x) = %g' % np.linalg.norm(x))
r = np.empty(n*n, 'd')
S.matvec(x, r)
r = b - r
print('norm(b - A*x) = %g' % np.linalg.norm(r))
from pysparse.eigen import jdsym
jdsym.jdsym(S, None, None, 5, 0.0, 1e-8, 100, qmrs, clvl=1)
| true | true |
f720f3ad35136c86211956b945ba2de3bd65784c | 170 | py | Python | scripts/item/consume_2432355.py | Snewmy/swordie | ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17 | [
"MIT"
] | null | null | null | scripts/item/consume_2432355.py | Snewmy/swordie | ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17 | [
"MIT"
] | null | null | null | scripts/item/consume_2432355.py | Snewmy/swordie | ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17 | [
"MIT"
] | null | null | null | # Snowflake Damage Skin
success = sm.addDamageSkin(2432355)
if success:
sm.chat("The Snowflake Damage Skin has been added to your account's damage skin collection.")
| 34 | 97 | 0.770588 |
success = sm.addDamageSkin(2432355)
if success:
sm.chat("The Snowflake Damage Skin has been added to your account's damage skin collection.")
| true | true |
f720f5d9454e5ea4b2e9262d909e29b9ee507501 | 1,314 | py | Python | app/core/tests/test_admin.py | royandri/recipe-app-api | 5eb7fd433946f6c25fb84d063a46173ee595adf5 | [
"MIT"
] | null | null | null | app/core/tests/test_admin.py | royandri/recipe-app-api | 5eb7fd433946f6c25fb84d063a46173ee595adf5 | [
"MIT"
] | null | null | null | app/core/tests/test_admin.py | royandri/recipe-app-api | 5eb7fd433946f6c25fb84d063a46173ee595adf5 | [
"MIT"
] | null | null | null | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='royandri.dev@gmail.com',
password='admin'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email=' test@mail.com',
password='admin',
name='Test User'
)
def test_users_listed(self):
# Test that users are listed on user page
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
# Test that user edit pages works
url = reverse('admin:core_user_change', args=[self.user.id])
# /admin/core/user/1
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
# Test that the crate user page works
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| 31.285714 | 68 | 0.637747 | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='royandri.dev@gmail.com',
password='admin'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email=' test@mail.com',
password='admin',
name='Test User'
)
def test_users_listed(self):
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| true | true |
f720f7d7aa6b5c6b8450862f0abd2256a26a8136 | 58 | py | Python | www/speed/benchmarks/function_call.py | olemis/brython | 3ef4a602eed5a75130e507707579ad9aa2dc3e5c | [
"BSD-3-Clause"
] | 2 | 2018-06-09T15:29:48.000Z | 2019-11-13T09:15:08.000Z | www/speed/benchmarks/function_call.py | olemis/brython | 3ef4a602eed5a75130e507707579ad9aa2dc3e5c | [
"BSD-3-Clause"
] | 2 | 2017-04-14T03:52:41.000Z | 2017-04-14T04:02:06.000Z | client/components/ide/brython/www/speed/benchmarks/function_call.py | pascualy/coding_blind | 420947c61ec3cd0169d5a25f7b01ae6df9541607 | [
"MIT"
] | 2 | 2018-02-22T09:48:18.000Z | 2020-06-04T17:00:09.000Z | def f(x):
return x
for i in range(1000000):
f(i)
| 9.666667 | 24 | 0.551724 | def f(x):
return x
for i in range(1000000):
f(i)
| true | true |
f720f8eccc250efd8c3d430ddb9ee9afde19d1ec | 4,224 | py | Python | lmctl/cli/commands/targets/behaviour_projects.py | manojn97/lmctl | 844925cb414722351efac90cb97f10c1185eef7a | [
"Apache-2.0"
] | 3 | 2021-07-19T09:46:01.000Z | 2022-03-07T13:51:25.000Z | lmctl/cli/commands/targets/behaviour_projects.py | manojn97/lmctl | 844925cb414722351efac90cb97f10c1185eef7a | [
"Apache-2.0"
] | 43 | 2019-08-27T12:36:29.000Z | 2020-08-27T14:50:40.000Z | lmctl/cli/commands/targets/behaviour_projects.py | manojn97/lmctl | 844925cb414722351efac90cb97f10c1185eef7a | [
"Apache-2.0"
] | 7 | 2020-09-22T20:32:17.000Z | 2022-03-29T12:25:51.000Z | import click
from typing import Dict
from lmctl.client import TNCOClient, TNCOClientHttpError
from lmctl.cli.arguments import common_output_format_handler
from lmctl.cli.format import Table, Column
from .tnco_target import TNCOTarget, LmGet, LmCreate, LmUpdate, LmDelete, LmGen
class ProjectTable(Table):
columns = [
Column('name', header='Name'),
Column('description', header='Description')
]
output_formats = common_output_format_handler(table=ProjectTable())
class Projects(TNCOTarget):
name = 'behaviourproject'
plural = 'behaviourprojects'
display_name = 'Behaviour Project'
@LmGen()
def genfile(self, ctx: click.Context, name: str):
return {
'name': f'assembly::{name}::1.0',
}
@LmGet(output_formats=output_formats, help=f'''\
Get all {display_name}s or get one by name\
\n\nUse NAME argument to get by one by name\
\n\nOmit NAME argument get all projects\
\n\nNote: all Assembly descriptors have a Behaviour Project associated with them so can be found using their name e.g. assembly::example::1.0''')
@click.argument('name', required=False)
def get(self, tnco_client: TNCOClient, ctx: click.Context, name: str = None):
api = tnco_client.behaviour_projects
if name is not None:
return api.get(name)
else:
return api.all()
@LmCreate()
def create(self, tnco_client: TNCOClient, ctx: click.Context, file_content: Dict = None, set_values: Dict = None):
api = tnco_client.behaviour_projects
if file_content is not None:
if set_values is not None and len(set_values) > 0:
raise click.BadArgumentUsage(message='Do not use "--set" option when using "-f, --file" option', ctx=ctx)
project = file_content
else:
project = set_values
result = api.create(project)
return result.get('name')
@LmUpdate()
@click.argument('name', required=False)
def update(self, tnco_client: TNCOClient, ctx: click.Context, file_content: Dict = None, name: str = None, set_values: Dict = None):
api = tnco_client.behaviour_projects
if file_content is not None:
if name is not None:
raise click.BadArgumentUsage(message='Do not use "NAME" argument when using "-f, --file" option', ctx=ctx)
project = file_content
else:
if name is None:
raise click.BadArgumentUsage(message='Must set "NAME" argument when no "-f, --file" option specified', ctx=ctx)
project = api.get(name)
project.update(set_values)
result = api.update(project)
return project.get('name')
@LmDelete()
@click.argument('name', required=False)
def delete(self, tnco_client: TNCOClient, ctx: click.Context, file_content: Dict = None, name: str = None, ignore_missing: bool = None):
api = tnco_client.behaviour_projects
if file_content is not None:
if name is not None:
raise click.BadArgumentUsage(message='Do not use "NAME" argument when using "-f, --file" option', ctx=ctx)
project = file_content
project_id = project.get('id', project.get('name', None))
if project_id is None:
raise click.BadArgumentUsage(message='Object from file does not contain an "name" (or "id") attribute', ctx=ctx)
else:
if name is None:
raise click.BadArgumentUsage(message='Must set "NAME" argument when no "-f, --file" option specified', ctx=ctx)
project_id = name
try:
result = api.delete(project_id)
except TNCOClientHttpError as e:
if e.status_code == 404:
# Not found
if ignore_missing:
ctl = self._get_controller()
ctl.io.print(f'No {self.display_name} found with name (ID) {project_id} (ignoring)')
return
raise
return project_id | 44.93617 | 189 | 0.60535 | import click
from typing import Dict
from lmctl.client import TNCOClient, TNCOClientHttpError
from lmctl.cli.arguments import common_output_format_handler
from lmctl.cli.format import Table, Column
from .tnco_target import TNCOTarget, LmGet, LmCreate, LmUpdate, LmDelete, LmGen
class ProjectTable(Table):
columns = [
Column('name', header='Name'),
Column('description', header='Description')
]
output_formats = common_output_format_handler(table=ProjectTable())
class Projects(TNCOTarget):
name = 'behaviourproject'
plural = 'behaviourprojects'
display_name = 'Behaviour Project'
@LmGen()
def genfile(self, ctx: click.Context, name: str):
return {
'name': f'assembly::{name}::1.0',
}
@LmGet(output_formats=output_formats, help=f'''\
Get all {display_name}s or get one by name\
\n\nUse NAME argument to get by one by name\
\n\nOmit NAME argument get all projects\
\n\nNote: all Assembly descriptors have a Behaviour Project associated with them so can be found using their name e.g. assembly::example::1.0''')
@click.argument('name', required=False)
def get(self, tnco_client: TNCOClient, ctx: click.Context, name: str = None):
api = tnco_client.behaviour_projects
if name is not None:
return api.get(name)
else:
return api.all()
@LmCreate()
def create(self, tnco_client: TNCOClient, ctx: click.Context, file_content: Dict = None, set_values: Dict = None):
api = tnco_client.behaviour_projects
if file_content is not None:
if set_values is not None and len(set_values) > 0:
raise click.BadArgumentUsage(message='Do not use "--set" option when using "-f, --file" option', ctx=ctx)
project = file_content
else:
project = set_values
result = api.create(project)
return result.get('name')
@LmUpdate()
@click.argument('name', required=False)
def update(self, tnco_client: TNCOClient, ctx: click.Context, file_content: Dict = None, name: str = None, set_values: Dict = None):
api = tnco_client.behaviour_projects
if file_content is not None:
if name is not None:
raise click.BadArgumentUsage(message='Do not use "NAME" argument when using "-f, --file" option', ctx=ctx)
project = file_content
else:
if name is None:
raise click.BadArgumentUsage(message='Must set "NAME" argument when no "-f, --file" option specified', ctx=ctx)
project = api.get(name)
project.update(set_values)
result = api.update(project)
return project.get('name')
@LmDelete()
@click.argument('name', required=False)
def delete(self, tnco_client: TNCOClient, ctx: click.Context, file_content: Dict = None, name: str = None, ignore_missing: bool = None):
api = tnco_client.behaviour_projects
if file_content is not None:
if name is not None:
raise click.BadArgumentUsage(message='Do not use "NAME" argument when using "-f, --file" option', ctx=ctx)
project = file_content
project_id = project.get('id', project.get('name', None))
if project_id is None:
raise click.BadArgumentUsage(message='Object from file does not contain an "name" (or "id") attribute', ctx=ctx)
else:
if name is None:
raise click.BadArgumentUsage(message='Must set "NAME" argument when no "-f, --file" option specified', ctx=ctx)
project_id = name
try:
result = api.delete(project_id)
except TNCOClientHttpError as e:
if e.status_code == 404:
if ignore_missing:
ctl = self._get_controller()
ctl.io.print(f'No {self.display_name} found with name (ID) {project_id} (ignoring)')
return
raise
return project_id | true | true |
f720f90efb06d99eed40521f7e2ae957d0796d80 | 6,907 | py | Python | toontown/coghq/DistributedMintRoom.py | CrankySupertoon01/Toontown-2 | 60893d104528a8e7eb4aced5d0015f22e203466d | [
"MIT"
] | 1 | 2021-02-13T22:40:50.000Z | 2021-02-13T22:40:50.000Z | toontown/coghq/DistributedMintRoom.py | CrankySupertoonArchive/Toontown-2 | 60893d104528a8e7eb4aced5d0015f22e203466d | [
"MIT"
] | 1 | 2018-07-28T20:07:04.000Z | 2018-07-30T18:28:34.000Z | toontown/coghq/DistributedMintRoom.py | CrankySupertoonArchive/Toontown-2 | 60893d104528a8e7eb4aced5d0015f22e203466d | [
"MIT"
] | 2 | 2019-12-02T01:39:10.000Z | 2021-02-13T22:41:00.000Z | from direct.directnotify import DirectNotifyGlobal
from direct.distributed.ClockDelta import *
from direct.interval.IntervalGlobal import *
from panda3d.core import *
import random
import FactoryEntityCreator
import MintRoomBase, MintRoom
import MintRoomSpecs
from otp.level import DistributedLevel
from otp.level import LevelSpec, LevelConstants
from otp.nametag.NametagConstants import *
from toontown.toonbase import TTLocalizer
from toontown.toonbase.ToontownGlobals import *
def getMintRoomReadyPostName(doId):
return 'mintRoomReady-%s' % doId
class DistributedMintRoom(DistributedLevel.DistributedLevel, MintRoomBase.MintRoomBase, MintRoom.MintRoom):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedMintRoom')
EmulateEntrancePoint = False
def __init__(self, cr):
DistributedLevel.DistributedLevel.__init__(self, cr)
MintRoomBase.MintRoomBase.__init__(self)
MintRoom.MintRoom.__init__(self)
self.suitIds = []
self.suits = []
self.reserveSuits = []
self.joiningReserves = []
self.suitsInitialized = 0
self.goonClipPlanes = {}
self.mint = None
return
def createEntityCreator(self):
return FactoryEntityCreator.FactoryEntityCreator(level=self)
def generate(self):
self.notify.debug('generate')
DistributedLevel.DistributedLevel.generate(self)
def delete(self):
del self.mint
DistributedLevel.DistributedLevel.delete(self)
MintRoom.MintRoom.delete(self)
self.ignoreAll()
def setMintId(self, mintId):
self.notify.debug('mintId: %s' % mintId)
MintRoomBase.MintRoomBase.setMintId(self, mintId)
def setRoomId(self, roomId):
self.notify.debug('roomId: %s' % roomId)
MintRoomBase.MintRoomBase.setRoomId(self, roomId)
def setRoomNum(self, num):
self.notify.debug('roomNum: %s' % num)
MintRoom.MintRoom.setRoomNum(self, num)
def levelAnnounceGenerate(self):
self.notify.debug('levelAnnounceGenerate')
DistributedLevel.DistributedLevel.levelAnnounceGenerate(self)
specModule = MintRoomSpecs.getMintRoomSpecModule(self.roomId)
roomSpec = LevelSpec.LevelSpec(specModule)
DistributedLevel.DistributedLevel.initializeLevel(self, roomSpec)
def getReadyPostName(self):
return getMintRoomReadyPostName(self.doId)
def privGotSpec(self, levelSpec):
DistributedLevel.DistributedLevel.privGotSpec(self, levelSpec)
MintRoom.MintRoom.enter(self)
self.acceptOnce('leavingMint', self.announceLeaving)
bboard.post(self.getReadyPostName())
def fixupLevelModel(self):
MintRoom.MintRoom.setGeom(self, self.geom)
MintRoom.MintRoom.initFloorCollisions(self)
def setMint(self, mint):
self.mint = mint
def setBossConfronted(self, avId):
self.mint.setBossConfronted(avId)
def setDefeated(self):
self.notify.info('setDefeated')
from toontown.coghq import DistributedMint
messenger.send(DistributedMint.DistributedMint.WinEvent)
def initVisibility(self, *args, **kw):
pass
def shutdownVisibility(self, *args, **kw):
pass
def lockVisibility(self, *args, **kw):
pass
def unlockVisibility(self, *args, **kw):
pass
def enterZone(self, *args, **kw):
pass
def updateVisibility(self, *args, **kw):
pass
def setVisibility(self, *args, **kw):
pass
def resetVisibility(self, *args, **kw):
pass
def handleVisChange(self, *args, **kw):
pass
def forceSetZoneThisFrame(self, *args, **kw):
pass
def getParentTokenForEntity(self, entId):
return 1000000 * self.roomNum + entId
def enterLtNotPresent(self):
MintRoom.MintRoom.enterLtNotPresent(self)
self.ignore('f2')
def enterLtPresent(self):
MintRoom.MintRoom.enterLtPresent(self)
if self.mint is not None:
self.mint.currentRoomName = MintRoomSpecs.CashbotMintRoomId2RoomName[self.roomId]
def printPos(self = self):
thisZone = self.getZoneNode(LevelConstants.UberZoneEntId)
pos = base.localAvatar.getPos(thisZone)
h = base.localAvatar.getH(thisZone)
roomName = MintRoomSpecs.CashbotMintRoomId2RoomName[self.roomId]
print 'mint pos: %s, h: %s, room: %s' % (repr(pos), h, roomName)
if self.mint is not None:
floorNum = self.mint.floorNum
else:
floorNum = '???'
posStr = 'X: %.3f' % pos[0] + '\nY: %.3f' % pos[1] + '\nZ: %.3f' % pos[2] + '\nH: %.3f' % h + '\nmintId: %s' % self.mintId + '\nfloor: %s' % floorNum + '\nroomId: %s' % self.roomId + '\nroomName: %s' % roomName
base.localAvatar.setChatAbsolute(posStr, CFThought | CFTimeout)
return
self.accept('f2', printPos)
return
def handleSOSPanel(self, panel):
avIds = []
for avId in self.avIdList:
if base.cr.doId2do.get(avId):
avIds.append(avId)
panel.setFactoryToonIdList(avIds)
def disable(self):
self.notify.debug('disable')
MintRoom.MintRoom.exit(self)
if hasattr(self, 'suits'):
del self.suits
if hasattr(self, 'relatedObjectMgrRequest') and self.relatedObjectMgrRequest:
self.cr.relatedObjectMgr.abortRequest(self.relatedObjectMgrRequest)
del self.relatedObjectMgrRequest
bboard.remove(self.getReadyPostName())
DistributedLevel.DistributedLevel.disable(self)
def setSuits(self, suitIds, reserveSuitIds):
oldSuitIds = list(self.suitIds)
self.suitIds = suitIds
self.reserveSuitIds = reserveSuitIds
def reservesJoining(self):
pass
def getCogSpec(self, cogId):
cogSpecModule = MintRoomSpecs.getCogSpecModule(self.roomId)
return cogSpecModule.CogData[cogId]
def getReserveCogSpec(self, cogId):
cogSpecModule = MintRoomSpecs.getCogSpecModule(self.roomId)
return cogSpecModule.ReserveCogData[cogId]
def getBattleCellSpec(self, battleCellId):
cogSpecModule = MintRoomSpecs.getCogSpecModule(self.roomId)
return cogSpecModule.BattleCells[battleCellId]
def getFloorOuchLevel(self):
return 8
def getTaskZoneId(self):
return self.mintId
def getBossTaunt(self):
return TTLocalizer.MintBossTaunt
def getBossBattleTaunt(self):
return TTLocalizer.MintBossBattleTaunt
def __str__(self):
if hasattr(self, 'roomId'):
return '%s %s: %s' % (self.__class__.__name__, self.roomId, MintRoomSpecs.CashbotMintRoomId2RoomName[self.roomId])
else:
return 'DistributedMintRoom'
def __repr__(self):
return str(self)
| 32.734597 | 222 | 0.668018 | from direct.directnotify import DirectNotifyGlobal
from direct.distributed.ClockDelta import *
from direct.interval.IntervalGlobal import *
from panda3d.core import *
import random
import FactoryEntityCreator
import MintRoomBase, MintRoom
import MintRoomSpecs
from otp.level import DistributedLevel
from otp.level import LevelSpec, LevelConstants
from otp.nametag.NametagConstants import *
from toontown.toonbase import TTLocalizer
from toontown.toonbase.ToontownGlobals import *
def getMintRoomReadyPostName(doId):
return 'mintRoomReady-%s' % doId
class DistributedMintRoom(DistributedLevel.DistributedLevel, MintRoomBase.MintRoomBase, MintRoom.MintRoom):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedMintRoom')
EmulateEntrancePoint = False
def __init__(self, cr):
DistributedLevel.DistributedLevel.__init__(self, cr)
MintRoomBase.MintRoomBase.__init__(self)
MintRoom.MintRoom.__init__(self)
self.suitIds = []
self.suits = []
self.reserveSuits = []
self.joiningReserves = []
self.suitsInitialized = 0
self.goonClipPlanes = {}
self.mint = None
return
def createEntityCreator(self):
return FactoryEntityCreator.FactoryEntityCreator(level=self)
def generate(self):
self.notify.debug('generate')
DistributedLevel.DistributedLevel.generate(self)
def delete(self):
del self.mint
DistributedLevel.DistributedLevel.delete(self)
MintRoom.MintRoom.delete(self)
self.ignoreAll()
def setMintId(self, mintId):
self.notify.debug('mintId: %s' % mintId)
MintRoomBase.MintRoomBase.setMintId(self, mintId)
def setRoomId(self, roomId):
self.notify.debug('roomId: %s' % roomId)
MintRoomBase.MintRoomBase.setRoomId(self, roomId)
def setRoomNum(self, num):
self.notify.debug('roomNum: %s' % num)
MintRoom.MintRoom.setRoomNum(self, num)
def levelAnnounceGenerate(self):
self.notify.debug('levelAnnounceGenerate')
DistributedLevel.DistributedLevel.levelAnnounceGenerate(self)
specModule = MintRoomSpecs.getMintRoomSpecModule(self.roomId)
roomSpec = LevelSpec.LevelSpec(specModule)
DistributedLevel.DistributedLevel.initializeLevel(self, roomSpec)
def getReadyPostName(self):
return getMintRoomReadyPostName(self.doId)
def privGotSpec(self, levelSpec):
DistributedLevel.DistributedLevel.privGotSpec(self, levelSpec)
MintRoom.MintRoom.enter(self)
self.acceptOnce('leavingMint', self.announceLeaving)
bboard.post(self.getReadyPostName())
def fixupLevelModel(self):
MintRoom.MintRoom.setGeom(self, self.geom)
MintRoom.MintRoom.initFloorCollisions(self)
def setMint(self, mint):
self.mint = mint
def setBossConfronted(self, avId):
self.mint.setBossConfronted(avId)
def setDefeated(self):
self.notify.info('setDefeated')
from toontown.coghq import DistributedMint
messenger.send(DistributedMint.DistributedMint.WinEvent)
def initVisibility(self, *args, **kw):
pass
def shutdownVisibility(self, *args, **kw):
pass
def lockVisibility(self, *args, **kw):
pass
def unlockVisibility(self, *args, **kw):
pass
def enterZone(self, *args, **kw):
pass
def updateVisibility(self, *args, **kw):
pass
def setVisibility(self, *args, **kw):
pass
def resetVisibility(self, *args, **kw):
pass
def handleVisChange(self, *args, **kw):
pass
def forceSetZoneThisFrame(self, *args, **kw):
pass
def getParentTokenForEntity(self, entId):
return 1000000 * self.roomNum + entId
def enterLtNotPresent(self):
MintRoom.MintRoom.enterLtNotPresent(self)
self.ignore('f2')
def enterLtPresent(self):
MintRoom.MintRoom.enterLtPresent(self)
if self.mint is not None:
self.mint.currentRoomName = MintRoomSpecs.CashbotMintRoomId2RoomName[self.roomId]
def printPos(self = self):
thisZone = self.getZoneNode(LevelConstants.UberZoneEntId)
pos = base.localAvatar.getPos(thisZone)
h = base.localAvatar.getH(thisZone)
roomName = MintRoomSpecs.CashbotMintRoomId2RoomName[self.roomId]
print 'mint pos: %s, h: %s, room: %s' % (repr(pos), h, roomName)
if self.mint is not None:
floorNum = self.mint.floorNum
else:
floorNum = '???'
posStr = 'X: %.3f' % pos[0] + '\nY: %.3f' % pos[1] + '\nZ: %.3f' % pos[2] + '\nH: %.3f' % h + '\nmintId: %s' % self.mintId + '\nfloor: %s' % floorNum + '\nroomId: %s' % self.roomId + '\nroomName: %s' % roomName
base.localAvatar.setChatAbsolute(posStr, CFThought | CFTimeout)
return
self.accept('f2', printPos)
return
def handleSOSPanel(self, panel):
avIds = []
for avId in self.avIdList:
if base.cr.doId2do.get(avId):
avIds.append(avId)
panel.setFactoryToonIdList(avIds)
def disable(self):
self.notify.debug('disable')
MintRoom.MintRoom.exit(self)
if hasattr(self, 'suits'):
del self.suits
if hasattr(self, 'relatedObjectMgrRequest') and self.relatedObjectMgrRequest:
self.cr.relatedObjectMgr.abortRequest(self.relatedObjectMgrRequest)
del self.relatedObjectMgrRequest
bboard.remove(self.getReadyPostName())
DistributedLevel.DistributedLevel.disable(self)
def setSuits(self, suitIds, reserveSuitIds):
oldSuitIds = list(self.suitIds)
self.suitIds = suitIds
self.reserveSuitIds = reserveSuitIds
def reservesJoining(self):
pass
def getCogSpec(self, cogId):
cogSpecModule = MintRoomSpecs.getCogSpecModule(self.roomId)
return cogSpecModule.CogData[cogId]
def getReserveCogSpec(self, cogId):
cogSpecModule = MintRoomSpecs.getCogSpecModule(self.roomId)
return cogSpecModule.ReserveCogData[cogId]
def getBattleCellSpec(self, battleCellId):
cogSpecModule = MintRoomSpecs.getCogSpecModule(self.roomId)
return cogSpecModule.BattleCells[battleCellId]
def getFloorOuchLevel(self):
return 8
def getTaskZoneId(self):
return self.mintId
def getBossTaunt(self):
return TTLocalizer.MintBossTaunt
def getBossBattleTaunt(self):
return TTLocalizer.MintBossBattleTaunt
def __str__(self):
if hasattr(self, 'roomId'):
return '%s %s: %s' % (self.__class__.__name__, self.roomId, MintRoomSpecs.CashbotMintRoomId2RoomName[self.roomId])
else:
return 'DistributedMintRoom'
def __repr__(self):
return str(self)
| false | true |
f720f9e7fd9b231b60cfa0de9c50219e99364bef | 2,516 | py | Python | api/serializers.py | NiklasMerz/shoppinglist | 38c494b2a2f80a0c543beaf0d9d9a75870bdbb22 | [
"MIT"
] | null | null | null | api/serializers.py | NiklasMerz/shoppinglist | 38c494b2a2f80a0c543beaf0d9d9a75870bdbb22 | [
"MIT"
] | 45 | 2021-11-03T20:48:50.000Z | 2021-12-14T21:22:12.000Z | api/serializers.py | NiklasMerz/shoppinglist | 38c494b2a2f80a0c543beaf0d9d9a75870bdbb22 | [
"MIT"
] | null | null | null | from list.models import *
from rest_framework import serializers
class CatalogItemSerializer(serializers.ModelSerializer):
class Meta:
model = CatalogItem
fields = ['id', 'description']
class ItemSerializer(serializers.ModelSerializer):
last_checkout = serializers.SerializerMethodField()
last_line_item_date = serializers.SerializerMethodField()
last_line_item_total = serializers.SerializerMethodField()
last_line_item_store = serializers.SerializerMethodField()
def get_last_checkout(self, obj):
try:
return obj.checkouts.latest().time
except:
return None
def get_last_line_item_date(self, obj):
try:
return LineItem.objects.filter(sku__in=obj.skus.all()).latest().receipt.time
except:
return None
def get_last_line_item_total(self, obj):
try:
return LineItem.objects.filter(sku__in=obj.skus.all()).latest().total.amount
except:
return None
def get_last_line_item_store(self, obj):
try:
return LineItem.objects.filter(sku__in=obj.skus.all()).latest().receipt.store.name
except:
return None
class Meta:
model = Item
fields = ['id', 'description', 'note', 'buy', 'list', 'last_checkout', 'last_line_item_date', 'last_line_item_total', 'last_line_item_store', 'catalog_item']
class ListSerializer(serializers.ModelSerializer):
class Meta:
model = List
fields = ['id', 'name']
class StoreSerializer(serializers.ModelSerializer):
class Meta:
model = Store
fields = ['id', 'name', 'note', 'location']
class TripSerializer(serializers.ModelSerializer):
class Meta:
model = Trip
fields = ['id', 'time', 'store', 'list', 'finish_time', 'label', 'notes']
class CheckoutSerializer(serializers.ModelSerializer):
class Meta:
model = Checkout
fields = ['id', 'time', 'trip', 'item', 'count']
class LineItemSerializer(serializers.ModelSerializer):
item = serializers.PrimaryKeyRelatedField(read_only=True)
date = serializers.CharField(source='receipt.time')
class Meta:
model = LineItem
fields = ('id', 'description', 'total', 'quantity', 'item', 'date')
class ReceiptSerializer(serializers.ModelSerializer):
line_items = LineItemSerializer(many=True, read_only=True)
class Meta:
model = Receipt
fields = ['id', 'time', 'trip', 'total', 'line_items'] | 33.546667 | 165 | 0.661367 | from list.models import *
from rest_framework import serializers
class CatalogItemSerializer(serializers.ModelSerializer):
class Meta:
model = CatalogItem
fields = ['id', 'description']
class ItemSerializer(serializers.ModelSerializer):
last_checkout = serializers.SerializerMethodField()
last_line_item_date = serializers.SerializerMethodField()
last_line_item_total = serializers.SerializerMethodField()
last_line_item_store = serializers.SerializerMethodField()
def get_last_checkout(self, obj):
try:
return obj.checkouts.latest().time
except:
return None
def get_last_line_item_date(self, obj):
try:
return LineItem.objects.filter(sku__in=obj.skus.all()).latest().receipt.time
except:
return None
def get_last_line_item_total(self, obj):
try:
return LineItem.objects.filter(sku__in=obj.skus.all()).latest().total.amount
except:
return None
def get_last_line_item_store(self, obj):
try:
return LineItem.objects.filter(sku__in=obj.skus.all()).latest().receipt.store.name
except:
return None
class Meta:
model = Item
fields = ['id', 'description', 'note', 'buy', 'list', 'last_checkout', 'last_line_item_date', 'last_line_item_total', 'last_line_item_store', 'catalog_item']
class ListSerializer(serializers.ModelSerializer):
class Meta:
model = List
fields = ['id', 'name']
class StoreSerializer(serializers.ModelSerializer):
class Meta:
model = Store
fields = ['id', 'name', 'note', 'location']
class TripSerializer(serializers.ModelSerializer):
class Meta:
model = Trip
fields = ['id', 'time', 'store', 'list', 'finish_time', 'label', 'notes']
class CheckoutSerializer(serializers.ModelSerializer):
class Meta:
model = Checkout
fields = ['id', 'time', 'trip', 'item', 'count']
class LineItemSerializer(serializers.ModelSerializer):
item = serializers.PrimaryKeyRelatedField(read_only=True)
date = serializers.CharField(source='receipt.time')
class Meta:
model = LineItem
fields = ('id', 'description', 'total', 'quantity', 'item', 'date')
class ReceiptSerializer(serializers.ModelSerializer):
line_items = LineItemSerializer(many=True, read_only=True)
class Meta:
model = Receipt
fields = ['id', 'time', 'trip', 'total', 'line_items'] | true | true |
f720fb43dcf64ffc735cf5c4010db34b4ad229a7 | 8,091 | py | Python | tests/test_cli_exiftool.py | oPromessa/osxphotos | 0d7e324f0262093727147b9f22ed275e962e8725 | [
"MIT"
] | null | null | null | tests/test_cli_exiftool.py | oPromessa/osxphotos | 0d7e324f0262093727147b9f22ed275e962e8725 | [
"MIT"
] | null | null | null | tests/test_cli_exiftool.py | oPromessa/osxphotos | 0d7e324f0262093727147b9f22ed275e962e8725 | [
"MIT"
] | null | null | null | """Tests for `osxphotos exiftool` command."""
import glob
import json
import os
import pytest
from click.testing import CliRunner
from osxphotos.cli.exiftool_cli import exiftool
from osxphotos.cli.export import export
from osxphotos.exiftool import ExifTool, get_exiftool_path
from .test_cli import CLI_EXIFTOOL, PHOTOS_DB_15_7
# determine if exiftool installed so exiftool tests can be skipped
try:
exiftool_path = get_exiftool_path()
except FileNotFoundError:
exiftool_path = None
@pytest.mark.skipif(exiftool_path is None, reason="exiftool not installed")
def test_export_exiftool():
"""Test osxphotos exiftool"""
runner = CliRunner()
cwd = os.getcwd()
with runner.isolated_filesystem() as temp_dir:
uuid_option = []
for uuid in CLI_EXIFTOOL:
uuid_option.extend(("--uuid", uuid))
# first, export without --exiftool
result = runner.invoke(
export,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
temp_dir,
"-V",
*uuid_option,
],
)
assert result.exit_code == 0
files = glob.glob("*")
assert sorted(files) == sorted(
[CLI_EXIFTOOL[uuid]["File:FileName"] for uuid in CLI_EXIFTOOL]
)
# now, run exiftool command to update exiftool metadata
result = runner.invoke(
exiftool,
["--db", os.path.join(cwd, PHOTOS_DB_15_7), "-V", "--db-config", temp_dir],
)
assert result.exit_code == 0
exif = ExifTool(CLI_EXIFTOOL[uuid]["File:FileName"]).asdict()
for key in CLI_EXIFTOOL[uuid]:
if type(exif[key]) == list:
assert sorted(exif[key]) == sorted(CLI_EXIFTOOL[uuid][key])
else:
assert exif[key] == CLI_EXIFTOOL[uuid][key]
# now, export with --exiftool --update, no files should be updated
result = runner.invoke(
export,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
temp_dir,
"-V",
"--exiftool",
"--update",
*uuid_option,
],
)
assert result.exit_code == 0
assert f"exported: 0, updated: 0, skipped: {len(CLI_EXIFTOOL)}" in result.output
@pytest.mark.skipif(exiftool_path is None, reason="exiftool not installed")
def test_export_exiftool_album_keyword():
"""Test osxphotos exiftool with --album-template."""
runner = CliRunner()
cwd = os.getcwd()
with runner.isolated_filesystem() as temp_dir:
# first, export without --exiftool
result = runner.invoke(
export,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
temp_dir,
"-V",
"--album",
"Pumpkin Farm",
],
)
assert result.exit_code == 0
files = glob.glob("*")
assert len(files) == 3
# now, run exiftool command to update exiftool metadata
result = runner.invoke(
exiftool,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
"-V",
"--db-config",
"--report",
"exiftool.json",
"--album-keyword",
temp_dir,
],
)
assert result.exit_code == 0
report = json.load(open("exiftool.json", "r"))
assert len(report) == 3
# verify exiftool metadata was updated
for file in report:
exif = ExifTool(file["filename"]).asdict()
assert "Pumpkin Farm" in exif["IPTC:Keywords"]
# now, export with --exiftool --update, no files should be updated
result = runner.invoke(
export,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
temp_dir,
"-V",
"--exiftool",
"--update",
"--album",
"Pumpkin Farm",
"--album-keyword",
],
)
assert result.exit_code == 0
assert f"exported: 0, updated: 0, skipped: 3" in result.output
@pytest.mark.skipif(exiftool_path is None, reason="exiftool not installed")
def test_export_exiftool_keyword_template():
"""Test osxphotos exiftool with --keyword-template."""
runner = CliRunner()
cwd = os.getcwd()
with runner.isolated_filesystem() as temp_dir:
uuid_option = []
for uuid in CLI_EXIFTOOL:
uuid_option.extend(("--uuid", uuid))
# first, export without --exiftool
result = runner.invoke(
export,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
temp_dir,
"-V",
*uuid_option,
],
)
assert result.exit_code == 0
# now, run exiftool command to update exiftool metadata
result = runner.invoke(
exiftool,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
"-V",
"--db-config",
"--keyword-template",
"FOO",
temp_dir,
"--report",
"exiftool.json",
],
)
assert result.exit_code == 0
report = json.load(open("exiftool.json", "r"))
for file in report:
exif = ExifTool(file["filename"]).asdict()
assert "FOO" in exif["IPTC:Keywords"]
# now, export with --exiftool --update, no files should be updated
result = runner.invoke(
export,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
temp_dir,
"-V",
"--exiftool",
"--keyword-template",
"FOO",
"--update",
*uuid_option,
],
)
assert result.exit_code == 0
assert f"exported: 0, updated: 0, skipped: {len(CLI_EXIFTOOL)}" in result.output
@pytest.mark.skipif(exiftool_path is None, reason="exiftool not installed")
def test_export_exiftool_load_config():
"""Test osxphotos exiftool with --load-config"""
runner = CliRunner()
cwd = os.getcwd()
with runner.isolated_filesystem() as temp_dir:
uuid_option = []
for uuid in CLI_EXIFTOOL:
uuid_option.extend(("--uuid", uuid))
# first, export without --exiftool
result = runner.invoke(
export,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
temp_dir,
"-V",
"--save-config",
"config.toml",
*uuid_option,
],
)
assert result.exit_code == 0
# now, run exiftool command to update exiftool metadata
result = runner.invoke(
exiftool,
["-V", "--load-config", "config.toml", temp_dir],
)
assert result.exit_code == 0
exif = ExifTool(CLI_EXIFTOOL[uuid]["File:FileName"]).asdict()
for key in CLI_EXIFTOOL[uuid]:
if type(exif[key]) == list:
assert sorted(exif[key]) == sorted(CLI_EXIFTOOL[uuid][key])
else:
assert exif[key] == CLI_EXIFTOOL[uuid][key]
# now, export with --exiftool --update, no files should be updated
result = runner.invoke(
export,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
temp_dir,
"-V",
"--exiftool",
"--update",
*uuid_option,
],
)
assert result.exit_code == 0
assert f"exported: 0, updated: 0, skipped: {len(CLI_EXIFTOOL)}" in result.output
| 30.303371 | 88 | 0.502163 |
import glob
import json
import os
import pytest
from click.testing import CliRunner
from osxphotos.cli.exiftool_cli import exiftool
from osxphotos.cli.export import export
from osxphotos.exiftool import ExifTool, get_exiftool_path
from .test_cli import CLI_EXIFTOOL, PHOTOS_DB_15_7
try:
exiftool_path = get_exiftool_path()
except FileNotFoundError:
exiftool_path = None
@pytest.mark.skipif(exiftool_path is None, reason="exiftool not installed")
def test_export_exiftool():
runner = CliRunner()
cwd = os.getcwd()
with runner.isolated_filesystem() as temp_dir:
uuid_option = []
for uuid in CLI_EXIFTOOL:
uuid_option.extend(("--uuid", uuid))
result = runner.invoke(
export,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
temp_dir,
"-V",
*uuid_option,
],
)
assert result.exit_code == 0
files = glob.glob("*")
assert sorted(files) == sorted(
[CLI_EXIFTOOL[uuid]["File:FileName"] for uuid in CLI_EXIFTOOL]
)
result = runner.invoke(
exiftool,
["--db", os.path.join(cwd, PHOTOS_DB_15_7), "-V", "--db-config", temp_dir],
)
assert result.exit_code == 0
exif = ExifTool(CLI_EXIFTOOL[uuid]["File:FileName"]).asdict()
for key in CLI_EXIFTOOL[uuid]:
if type(exif[key]) == list:
assert sorted(exif[key]) == sorted(CLI_EXIFTOOL[uuid][key])
else:
assert exif[key] == CLI_EXIFTOOL[uuid][key]
result = runner.invoke(
export,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
temp_dir,
"-V",
"--exiftool",
"--update",
*uuid_option,
],
)
assert result.exit_code == 0
assert f"exported: 0, updated: 0, skipped: {len(CLI_EXIFTOOL)}" in result.output
@pytest.mark.skipif(exiftool_path is None, reason="exiftool not installed")
def test_export_exiftool_album_keyword():
runner = CliRunner()
cwd = os.getcwd()
with runner.isolated_filesystem() as temp_dir:
result = runner.invoke(
export,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
temp_dir,
"-V",
"--album",
"Pumpkin Farm",
],
)
assert result.exit_code == 0
files = glob.glob("*")
assert len(files) == 3
result = runner.invoke(
exiftool,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
"-V",
"--db-config",
"--report",
"exiftool.json",
"--album-keyword",
temp_dir,
],
)
assert result.exit_code == 0
report = json.load(open("exiftool.json", "r"))
assert len(report) == 3
for file in report:
exif = ExifTool(file["filename"]).asdict()
assert "Pumpkin Farm" in exif["IPTC:Keywords"]
result = runner.invoke(
export,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
temp_dir,
"-V",
"--exiftool",
"--update",
"--album",
"Pumpkin Farm",
"--album-keyword",
],
)
assert result.exit_code == 0
assert f"exported: 0, updated: 0, skipped: 3" in result.output
@pytest.mark.skipif(exiftool_path is None, reason="exiftool not installed")
def test_export_exiftool_keyword_template():
runner = CliRunner()
cwd = os.getcwd()
with runner.isolated_filesystem() as temp_dir:
uuid_option = []
for uuid in CLI_EXIFTOOL:
uuid_option.extend(("--uuid", uuid))
result = runner.invoke(
export,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
temp_dir,
"-V",
*uuid_option,
],
)
assert result.exit_code == 0
result = runner.invoke(
exiftool,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
"-V",
"--db-config",
"--keyword-template",
"FOO",
temp_dir,
"--report",
"exiftool.json",
],
)
assert result.exit_code == 0
report = json.load(open("exiftool.json", "r"))
for file in report:
exif = ExifTool(file["filename"]).asdict()
assert "FOO" in exif["IPTC:Keywords"]
result = runner.invoke(
export,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
temp_dir,
"-V",
"--exiftool",
"--keyword-template",
"FOO",
"--update",
*uuid_option,
],
)
assert result.exit_code == 0
assert f"exported: 0, updated: 0, skipped: {len(CLI_EXIFTOOL)}" in result.output
@pytest.mark.skipif(exiftool_path is None, reason="exiftool not installed")
def test_export_exiftool_load_config():
runner = CliRunner()
cwd = os.getcwd()
with runner.isolated_filesystem() as temp_dir:
uuid_option = []
for uuid in CLI_EXIFTOOL:
uuid_option.extend(("--uuid", uuid))
result = runner.invoke(
export,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
temp_dir,
"-V",
"--save-config",
"config.toml",
*uuid_option,
],
)
assert result.exit_code == 0
result = runner.invoke(
exiftool,
["-V", "--load-config", "config.toml", temp_dir],
)
assert result.exit_code == 0
exif = ExifTool(CLI_EXIFTOOL[uuid]["File:FileName"]).asdict()
for key in CLI_EXIFTOOL[uuid]:
if type(exif[key]) == list:
assert sorted(exif[key]) == sorted(CLI_EXIFTOOL[uuid][key])
else:
assert exif[key] == CLI_EXIFTOOL[uuid][key]
result = runner.invoke(
export,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
temp_dir,
"-V",
"--exiftool",
"--update",
*uuid_option,
],
)
assert result.exit_code == 0
assert f"exported: 0, updated: 0, skipped: {len(CLI_EXIFTOOL)}" in result.output
| true | true |
f720fb57cc3918cd168d86f2c7f319f139afdefb | 1,488 | py | Python | datasets/raman_tablets/__init__.py | ryuzakyl/data-bloodhound | ae0413e748e55a0d2dbae35bbe96a672f313a64b | [
"Apache-2.0"
] | 3 | 2019-03-18T03:22:06.000Z | 2021-04-06T07:53:51.000Z | datasets/raman_tablets/__init__.py | ryuzakyl/data-bloodhound | ae0413e748e55a0d2dbae35bbe96a672f313a64b | [
"Apache-2.0"
] | null | null | null | datasets/raman_tablets/__init__.py | ryuzakyl/data-bloodhound | ae0413e748e55a0d2dbae35bbe96a672f313a64b | [
"Apache-2.0"
] | 2 | 2020-10-05T08:22:25.000Z | 2020-10-05T08:24:02.000Z | #!/usr/bin/env
# -*- coding: utf-8 -*-
# Copyright (C) Victor M. Mendiola Lau - All Rights Reserved
# Unauthorized copying of this file, via any medium is strictly prohibited
# Proprietary and confidential
# Written by Victor M. Mendiola Lau <ryuzakyl@gmail.com>, February 2017
import os
import scipy.io as sio
import utils.datasets as utils
# ---------------------------------------------------------------
# data set paths
__data_set_path = "{}/data/Ramandata_tablets.mat".format(os.path.split(__file__)[0])
__pickle_path = "{}/cache/raman_tablets.pickle".format(os.path.split(__file__)[0])
# ---------------------------------------------------------------
# TODO: Add docstring with usage examples (see 'uv_fuel' data set)
@utils.load_data_from_pickle(__pickle_path)
def load_raman_tablets():
# loading matlab data set
raw_data = sio.loadmat(__data_set_path)
# getting samples labels
samples_labels = raw_data['ObjLabels'].tolist()
# getting features labels
raw_features = raw_data['VarLabels'].tolist()
features_labels = list(map(float, raw_features[2:]))
# getting data
raw_data = raw_data['Matrix']
data = raw_data[:, 2:]
# creating the extra columns
other_cols = {
'active (% w/w)': raw_data[:, 0].tolist(),
'Type': raw_data[:, 1].astype(int).tolist(),
}
# returning the built data set
return utils.build_data_set(data, samples_labels, features_labels, extra_cols=other_cols)
| 29.76 | 93 | 0.635753 |
import os
import scipy.io as sio
import utils.datasets as utils
__data_set_path = "{}/data/Ramandata_tablets.mat".format(os.path.split(__file__)[0])
__pickle_path = "{}/cache/raman_tablets.pickle".format(os.path.split(__file__)[0])
@utils.load_data_from_pickle(__pickle_path)
def load_raman_tablets():
raw_data = sio.loadmat(__data_set_path)
samples_labels = raw_data['ObjLabels'].tolist()
raw_features = raw_data['VarLabels'].tolist()
features_labels = list(map(float, raw_features[2:]))
raw_data = raw_data['Matrix']
data = raw_data[:, 2:]
other_cols = {
'active (% w/w)': raw_data[:, 0].tolist(),
'Type': raw_data[:, 1].astype(int).tolist(),
}
return utils.build_data_set(data, samples_labels, features_labels, extra_cols=other_cols)
| true | true |
f720fb60277344026d5780ac04e0013b225304fb | 4,616 | py | Python | homeassistant/components/climate/homekit_controller.py | dauden1184/home-assistant | f4c6d389b77d0efa86644e76604eaea5d21abdb5 | [
"Apache-2.0"
] | 4 | 2019-01-10T14:47:54.000Z | 2021-04-22T02:06:27.000Z | homeassistant/components/climate/homekit_controller.py | dauden1184/home-assistant | f4c6d389b77d0efa86644e76604eaea5d21abdb5 | [
"Apache-2.0"
] | 6 | 2021-02-08T20:25:50.000Z | 2022-03-11T23:27:53.000Z | homeassistant/components/climate/homekit_controller.py | dauden1184/home-assistant | f4c6d389b77d0efa86644e76604eaea5d21abdb5 | [
"Apache-2.0"
] | 3 | 2018-09-14T07:34:09.000Z | 2018-09-29T12:57:10.000Z | """
Support for Homekit climate devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.homekit_controller/
"""
import logging
from homeassistant.components.homekit_controller import (
HomeKitEntity, KNOWN_ACCESSORIES)
from homeassistant.components.climate import (
ClimateDevice, STATE_HEAT, STATE_COOL, STATE_IDLE,
SUPPORT_TARGET_TEMPERATURE, SUPPORT_OPERATION_MODE)
from homeassistant.const import TEMP_CELSIUS, STATE_OFF, ATTR_TEMPERATURE
DEPENDENCIES = ['homekit_controller']
_LOGGER = logging.getLogger(__name__)
# Map of Homekit operation modes to hass modes
MODE_HOMEKIT_TO_HASS = {
0: STATE_OFF,
1: STATE_HEAT,
2: STATE_COOL,
}
# Map of hass operation modes to homekit modes
MODE_HASS_TO_HOMEKIT = {v: k for k, v in MODE_HOMEKIT_TO_HASS.items()}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up Homekit climate."""
if discovery_info is not None:
accessory = hass.data[KNOWN_ACCESSORIES][discovery_info['serial']]
add_entities([HomeKitClimateDevice(accessory, discovery_info)], True)
class HomeKitClimateDevice(HomeKitEntity, ClimateDevice):
"""Representation of a Homekit climate device."""
def __init__(self, *args):
"""Initialise the device."""
super().__init__(*args)
self._state = None
self._current_mode = None
self._valid_modes = []
self._current_temp = None
self._target_temp = None
def update_characteristics(self, characteristics):
"""Synchronise device state with Home Assistant."""
# pylint: disable=import-error
from homekit import CharacteristicsTypes as ctypes
for characteristic in characteristics:
ctype = characteristic['type']
if ctype == ctypes.HEATING_COOLING_CURRENT:
self._state = MODE_HOMEKIT_TO_HASS.get(
characteristic['value'])
if ctype == ctypes.HEATING_COOLING_TARGET:
self._chars['target_mode'] = characteristic['iid']
self._features |= SUPPORT_OPERATION_MODE
self._current_mode = MODE_HOMEKIT_TO_HASS.get(
characteristic['value'])
self._valid_modes = [MODE_HOMEKIT_TO_HASS.get(
mode) for mode in characteristic['valid-values']]
elif ctype == ctypes.TEMPERATURE_CURRENT:
self._current_temp = characteristic['value']
elif ctype == ctypes.TEMPERATURE_TARGET:
self._chars['target_temp'] = characteristic['iid']
self._features |= SUPPORT_TARGET_TEMPERATURE
self._target_temp = characteristic['value']
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temp = kwargs.get(ATTR_TEMPERATURE)
characteristics = [{'aid': self._aid,
'iid': self._chars['target_temp'],
'value': temp}]
self.put_characteristics(characteristics)
def set_operation_mode(self, operation_mode):
"""Set new target operation mode."""
characteristics = [{'aid': self._aid,
'iid': self._chars['target_mode'],
'value': MODE_HASS_TO_HOMEKIT[operation_mode]}]
self.put_characteristics(characteristics)
@property
def state(self):
"""Return the current state."""
# If the device reports its operating mode as off, it sometimes doesn't
# report a new state.
if self._current_mode == STATE_OFF:
return STATE_OFF
if self._state == STATE_OFF and self._current_mode != STATE_OFF:
return STATE_IDLE
return self._state
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temp
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temp
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
return self._current_mode
@property
def operation_list(self):
"""Return the list of available operation modes."""
return self._valid_modes
@property
def supported_features(self):
"""Return the list of supported features."""
return self._features
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
| 35.236641 | 79 | 0.649697 | import logging
from homeassistant.components.homekit_controller import (
HomeKitEntity, KNOWN_ACCESSORIES)
from homeassistant.components.climate import (
ClimateDevice, STATE_HEAT, STATE_COOL, STATE_IDLE,
SUPPORT_TARGET_TEMPERATURE, SUPPORT_OPERATION_MODE)
from homeassistant.const import TEMP_CELSIUS, STATE_OFF, ATTR_TEMPERATURE
DEPENDENCIES = ['homekit_controller']
_LOGGER = logging.getLogger(__name__)
MODE_HOMEKIT_TO_HASS = {
0: STATE_OFF,
1: STATE_HEAT,
2: STATE_COOL,
}
MODE_HASS_TO_HOMEKIT = {v: k for k, v in MODE_HOMEKIT_TO_HASS.items()}
def setup_platform(hass, config, add_entities, discovery_info=None):
if discovery_info is not None:
accessory = hass.data[KNOWN_ACCESSORIES][discovery_info['serial']]
add_entities([HomeKitClimateDevice(accessory, discovery_info)], True)
class HomeKitClimateDevice(HomeKitEntity, ClimateDevice):
def __init__(self, *args):
super().__init__(*args)
self._state = None
self._current_mode = None
self._valid_modes = []
self._current_temp = None
self._target_temp = None
def update_characteristics(self, characteristics):
from homekit import CharacteristicsTypes as ctypes
for characteristic in characteristics:
ctype = characteristic['type']
if ctype == ctypes.HEATING_COOLING_CURRENT:
self._state = MODE_HOMEKIT_TO_HASS.get(
characteristic['value'])
if ctype == ctypes.HEATING_COOLING_TARGET:
self._chars['target_mode'] = characteristic['iid']
self._features |= SUPPORT_OPERATION_MODE
self._current_mode = MODE_HOMEKIT_TO_HASS.get(
characteristic['value'])
self._valid_modes = [MODE_HOMEKIT_TO_HASS.get(
mode) for mode in characteristic['valid-values']]
elif ctype == ctypes.TEMPERATURE_CURRENT:
self._current_temp = characteristic['value']
elif ctype == ctypes.TEMPERATURE_TARGET:
self._chars['target_temp'] = characteristic['iid']
self._features |= SUPPORT_TARGET_TEMPERATURE
self._target_temp = characteristic['value']
def set_temperature(self, **kwargs):
temp = kwargs.get(ATTR_TEMPERATURE)
characteristics = [{'aid': self._aid,
'iid': self._chars['target_temp'],
'value': temp}]
self.put_characteristics(characteristics)
def set_operation_mode(self, operation_mode):
characteristics = [{'aid': self._aid,
'iid': self._chars['target_mode'],
'value': MODE_HASS_TO_HOMEKIT[operation_mode]}]
self.put_characteristics(characteristics)
@property
def state(self):
# report a new state.
if self._current_mode == STATE_OFF:
return STATE_OFF
if self._state == STATE_OFF and self._current_mode != STATE_OFF:
return STATE_IDLE
return self._state
@property
def current_temperature(self):
return self._current_temp
@property
def target_temperature(self):
return self._target_temp
@property
def current_operation(self):
return self._current_mode
@property
def operation_list(self):
return self._valid_modes
@property
def supported_features(self):
return self._features
@property
def temperature_unit(self):
return TEMP_CELSIUS
| true | true |
f720fb753855fb74cefd74341a9ca1be69022a34 | 247 | py | Python | frappe/patches/v5_3/rename_chinese_languages.py | Nxweb-in/frappe | 56b3eb52bf56dd71bee29fde3ed28ed9c6d15947 | [
"MIT"
] | 1 | 2021-06-03T07:04:48.000Z | 2021-06-03T07:04:48.000Z | frappe/patches/v5_3/rename_chinese_languages.py | Nxweb-in/frappe | 56b3eb52bf56dd71bee29fde3ed28ed9c6d15947 | [
"MIT"
] | null | null | null | frappe/patches/v5_3/rename_chinese_languages.py | Nxweb-in/frappe | 56b3eb52bf56dd71bee29fde3ed28ed9c6d15947 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import frappe
from frappe.translate import rename_language
def execute():
language_map = {
"中国(简体)": "簡體中文",
"中國(繁體)": "正體中文"
}
for old_name, new_name in language_map.items():
rename_language(old_name, new_name)
| 19 | 48 | 0.684211 |
import frappe
from frappe.translate import rename_language
def execute():
language_map = {
"中国(简体)": "簡體中文",
"中國(繁體)": "正體中文"
}
for old_name, new_name in language_map.items():
rename_language(old_name, new_name)
| true | true |
f720fbff40e522e9a078688ae64f8333f985dc4f | 110 | py | Python | video.py | KazukiChiyo/lane-keeping | 46ac1ce2cb96eb32a0da4946433c8d0ecbf4dc53 | [
"MIT"
] | 1 | 2018-10-09T12:59:30.000Z | 2018-10-09T12:59:30.000Z | video.py | KazukiChiyo/lane-keeping | 46ac1ce2cb96eb32a0da4946433c8d0ecbf4dc53 | [
"MIT"
] | null | null | null | video.py | KazukiChiyo/lane-keeping | 46ac1ce2cb96eb32a0da4946433c8d0ecbf4dc53 | [
"MIT"
] | 1 | 2020-05-22T05:57:29.000Z | 2020-05-22T05:57:29.000Z | from moviepy.editor import VideoFileClip
clip = VideoFileClip("output_images/out_video.mp4")
print(clip.fps)
| 22 | 51 | 0.818182 | from moviepy.editor import VideoFileClip
clip = VideoFileClip("output_images/out_video.mp4")
print(clip.fps)
| true | true |
f720fc48a7b225366d7031ba6afe3845468b78f8 | 5,354 | py | Python | tests/test_node_licenses.py | gaybro8777/osf.io | 30408511510a40bc393565817b343ef5fd76ab14 | [
"Apache-2.0"
] | 628 | 2015-01-15T04:33:22.000Z | 2022-03-30T06:40:10.000Z | tests/test_node_licenses.py | gaybro8777/osf.io | 30408511510a40bc393565817b343ef5fd76ab14 | [
"Apache-2.0"
] | 4,712 | 2015-01-02T01:41:53.000Z | 2022-03-30T14:18:40.000Z | tests/test_node_licenses.py | Johnetordoff/osf.io | de10bf249c46cede04c78f7e6f7e352c69e6e6b5 | [
"Apache-2.0"
] | 371 | 2015-01-12T16:14:08.000Z | 2022-03-31T18:58:29.000Z | # -*- coding: utf-8 -*-
import builtins
import json
import unittest
import mock
import pytest
from django.core.exceptions import ValidationError
from nose.tools import * # noqa: F403 (PEP8 asserts)
from framework.auth import Auth
from osf_tests.factories import (AuthUserFactory, NodeLicenseRecordFactory,
ProjectFactory)
from tests.base import OsfTestCase
from osf.utils.migrations import ensure_licenses
from tests.utils import assert_logs, assert_not_logs
from website import settings
from osf.models.licenses import NodeLicense, serialize_node_license_record, serialize_node_license
from osf.models import NodeLog
from osf.exceptions import NodeStateError
CHANGED_NAME = 'FOO BAR'
CHANGED_TEXT = 'Some good new text'
CHANGED_PROPERTIES = ['foo', 'bar']
LICENSE_TEXT = json.dumps({
'MIT': {
'name': CHANGED_NAME,
'text': CHANGED_TEXT,
'properties': CHANGED_PROPERTIES
}
})
class TestNodeLicenses(OsfTestCase):
def setUp(self):
super(TestNodeLicenses, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
self.LICENSE_NAME = 'MIT License'
self.node_license = NodeLicense.objects.get(name=self.LICENSE_NAME)
self.YEAR = '2105'
self.COPYRIGHT_HOLDERS = ['Foo', 'Bar']
self.node.node_license = NodeLicenseRecordFactory(
node_license=self.node_license,
year=self.YEAR,
copyright_holders=self.COPYRIGHT_HOLDERS
)
self.node.save()
def test_serialize_node_license(self):
serialized = serialize_node_license(self.node_license)
assert_equal(serialized['name'], self.LICENSE_NAME)
assert_equal(serialized['id'], self.node_license.license_id)
assert_equal(serialized['text'], self.node_license.text)
def test_serialize_node_license_record(self):
serialized = serialize_node_license_record(self.node.node_license)
assert_equal(serialized['name'], self.LICENSE_NAME)
assert_equal(serialized['id'], self.node_license.license_id)
assert_equal(serialized['text'], self.node_license.text)
assert_equal(serialized['year'], self.YEAR)
assert_equal(serialized['copyright_holders'], self.COPYRIGHT_HOLDERS)
def test_serialize_node_license_record_None(self):
self.node.node_license = None
serialized = serialize_node_license_record(self.node.node_license)
assert_equal(serialized, {})
def test_copy_node_license_record(self):
record = self.node.node_license
copied = record.copy()
assert_is_not_none(copied._id)
assert_not_equal(record._id, copied._id)
for prop in ('license_id', 'name', 'node_license'):
assert_equal(getattr(record, prop), getattr(copied, prop))
@pytest.mark.enable_implicit_clean
def test_license_uniqueness_on_id_is_enforced_in_the_database(self):
NodeLicense(license_id='foo', name='bar', text='baz').save()
assert_raises(ValidationError, NodeLicense(license_id='foo', name='buz', text='boo').save)
def test_ensure_licenses_updates_existing_licenses(self):
assert_equal(ensure_licenses(), (0, 18))
def test_ensure_licenses_no_licenses(self):
before_count = NodeLicense.objects.all().count()
NodeLicense.objects.all().delete()
assert_false(NodeLicense.objects.all().count())
ensure_licenses()
assert_equal(before_count, NodeLicense.objects.all().count())
def test_ensure_licenses_some_missing(self):
NodeLicense.objects.get(license_id='LGPL3').delete()
with assert_raises(NodeLicense.DoesNotExist):
NodeLicense.objects.get(license_id='LGPL3')
ensure_licenses()
found = NodeLicense.objects.get(license_id='LGPL3')
assert_is_not_none(found)
def test_ensure_licenses_updates_existing(self):
with mock.patch.object(builtins, 'open', mock.mock_open(read_data=LICENSE_TEXT)):
ensure_licenses()
MIT = NodeLicense.objects.get(license_id='MIT')
assert_equal(MIT.name, CHANGED_NAME)
assert_equal(MIT.text, CHANGED_TEXT)
assert_equal(MIT.properties, CHANGED_PROPERTIES)
@assert_logs(NodeLog.CHANGED_LICENSE, 'node')
def test_Node_set_node_license(self):
GPL3 = NodeLicense.objects.get(license_id='GPL3')
NEW_YEAR = '2014'
COPYLEFT_HOLDERS = ['Richard Stallman']
self.node.set_node_license(
{
'id': GPL3.license_id,
'year': NEW_YEAR,
'copyrightHolders': COPYLEFT_HOLDERS
},
auth=Auth(self.user),
save=True
)
assert_equal(self.node.node_license.license_id, GPL3.license_id)
assert_equal(self.node.node_license.name, GPL3.name)
assert_equal(self.node.node_license.copyright_holders, COPYLEFT_HOLDERS)
@assert_not_logs(NodeLog.CHANGED_LICENSE, 'node')
def test_Node_set_node_license_invalid(self):
with assert_raises(NodeStateError):
self.node.set_node_license(
{
'id': 'SOME ID',
'year': 'foo',
'copyrightHolders': []
},
auth=Auth(self.user)
)
| 37.704225 | 98 | 0.678371 |
import builtins
import json
import unittest
import mock
import pytest
from django.core.exceptions import ValidationError
from nose.tools import *
from framework.auth import Auth
from osf_tests.factories import (AuthUserFactory, NodeLicenseRecordFactory,
ProjectFactory)
from tests.base import OsfTestCase
from osf.utils.migrations import ensure_licenses
from tests.utils import assert_logs, assert_not_logs
from website import settings
from osf.models.licenses import NodeLicense, serialize_node_license_record, serialize_node_license
from osf.models import NodeLog
from osf.exceptions import NodeStateError
CHANGED_NAME = 'FOO BAR'
CHANGED_TEXT = 'Some good new text'
CHANGED_PROPERTIES = ['foo', 'bar']
LICENSE_TEXT = json.dumps({
'MIT': {
'name': CHANGED_NAME,
'text': CHANGED_TEXT,
'properties': CHANGED_PROPERTIES
}
})
class TestNodeLicenses(OsfTestCase):
def setUp(self):
super(TestNodeLicenses, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
self.LICENSE_NAME = 'MIT License'
self.node_license = NodeLicense.objects.get(name=self.LICENSE_NAME)
self.YEAR = '2105'
self.COPYRIGHT_HOLDERS = ['Foo', 'Bar']
self.node.node_license = NodeLicenseRecordFactory(
node_license=self.node_license,
year=self.YEAR,
copyright_holders=self.COPYRIGHT_HOLDERS
)
self.node.save()
def test_serialize_node_license(self):
serialized = serialize_node_license(self.node_license)
assert_equal(serialized['name'], self.LICENSE_NAME)
assert_equal(serialized['id'], self.node_license.license_id)
assert_equal(serialized['text'], self.node_license.text)
def test_serialize_node_license_record(self):
serialized = serialize_node_license_record(self.node.node_license)
assert_equal(serialized['name'], self.LICENSE_NAME)
assert_equal(serialized['id'], self.node_license.license_id)
assert_equal(serialized['text'], self.node_license.text)
assert_equal(serialized['year'], self.YEAR)
assert_equal(serialized['copyright_holders'], self.COPYRIGHT_HOLDERS)
def test_serialize_node_license_record_None(self):
self.node.node_license = None
serialized = serialize_node_license_record(self.node.node_license)
assert_equal(serialized, {})
def test_copy_node_license_record(self):
record = self.node.node_license
copied = record.copy()
assert_is_not_none(copied._id)
assert_not_equal(record._id, copied._id)
for prop in ('license_id', 'name', 'node_license'):
assert_equal(getattr(record, prop), getattr(copied, prop))
@pytest.mark.enable_implicit_clean
def test_license_uniqueness_on_id_is_enforced_in_the_database(self):
NodeLicense(license_id='foo', name='bar', text='baz').save()
assert_raises(ValidationError, NodeLicense(license_id='foo', name='buz', text='boo').save)
def test_ensure_licenses_updates_existing_licenses(self):
assert_equal(ensure_licenses(), (0, 18))
def test_ensure_licenses_no_licenses(self):
before_count = NodeLicense.objects.all().count()
NodeLicense.objects.all().delete()
assert_false(NodeLicense.objects.all().count())
ensure_licenses()
assert_equal(before_count, NodeLicense.objects.all().count())
def test_ensure_licenses_some_missing(self):
NodeLicense.objects.get(license_id='LGPL3').delete()
with assert_raises(NodeLicense.DoesNotExist):
NodeLicense.objects.get(license_id='LGPL3')
ensure_licenses()
found = NodeLicense.objects.get(license_id='LGPL3')
assert_is_not_none(found)
def test_ensure_licenses_updates_existing(self):
with mock.patch.object(builtins, 'open', mock.mock_open(read_data=LICENSE_TEXT)):
ensure_licenses()
MIT = NodeLicense.objects.get(license_id='MIT')
assert_equal(MIT.name, CHANGED_NAME)
assert_equal(MIT.text, CHANGED_TEXT)
assert_equal(MIT.properties, CHANGED_PROPERTIES)
@assert_logs(NodeLog.CHANGED_LICENSE, 'node')
def test_Node_set_node_license(self):
GPL3 = NodeLicense.objects.get(license_id='GPL3')
NEW_YEAR = '2014'
COPYLEFT_HOLDERS = ['Richard Stallman']
self.node.set_node_license(
{
'id': GPL3.license_id,
'year': NEW_YEAR,
'copyrightHolders': COPYLEFT_HOLDERS
},
auth=Auth(self.user),
save=True
)
assert_equal(self.node.node_license.license_id, GPL3.license_id)
assert_equal(self.node.node_license.name, GPL3.name)
assert_equal(self.node.node_license.copyright_holders, COPYLEFT_HOLDERS)
@assert_not_logs(NodeLog.CHANGED_LICENSE, 'node')
def test_Node_set_node_license_invalid(self):
with assert_raises(NodeStateError):
self.node.set_node_license(
{
'id': 'SOME ID',
'year': 'foo',
'copyrightHolders': []
},
auth=Auth(self.user)
)
| true | true |
f720fc870a26f0386b206c00d49fa2c271f5ac7a | 6,675 | py | Python | cavalgada_do_mar/src/webapps/website.py | ProfessionalIT/customers | 3dbc1989bb3494fb6de7edad67dc59b7b0385ac3 | [
"MIT"
] | null | null | null | cavalgada_do_mar/src/webapps/website.py | ProfessionalIT/customers | 3dbc1989bb3494fb6de7edad67dc59b7b0385ac3 | [
"MIT"
] | 1 | 2015-11-08T11:49:35.000Z | 2015-11-08T11:49:43.000Z | cavalgada_do_mar/src/webapps/website.py | ProfessionalIT/customers | 3dbc1989bb3494fb6de7edad67dc59b7b0385ac3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import web
from web.contrib import PyRSS2Gen
import render_website as render
import model
import forms
import logging
from paginator import Paginator, PaginatorSearch, PaginatorPublicacao
from datetime import datetime
from configuration import WEBSITE_URL
from utils import break_string
urls = (
'', 'Index',
'/', 'Index',
'/index', 'Index',
'/quem-somos', 'QuemSomos',
'/historico', 'Historico',
'/projetos-sociais', 'ProjetosSociais',
'/percurso', 'Percurso',
'/atividades', 'Atividades',
'/comenda', 'Comenda',
'/premiacoes', 'Premiacoes',
'/dicas', 'Dicas',
'/albuns', 'Albuns',
'/fotos', 'Fotos',
'/videos', 'Videos',
'/depoimentos', 'Depoimentos',
'/patrocinadores', 'Patrocionadores',
'/inscricao', 'Inscricao',
'/noticias', 'Noticias',
'/noticia/(.+)', 'Noticia',
'/boletins', 'Boletins',
'/boletim/(.+)', 'Boletim',
'/fale-conosco', 'Contato',
'/agradece-contato', 'Agradecimento',
'/rss', 'RSS'
)
class Index:
def GET(self):
return render.layout('menu_home', 'Página Inicial do Site', render.index())
class QuemSomos:
def GET(self):
return render.layout('menu_quem_somos', 'Fundação Cavalgada do Mar', render.pagina('quem-somos'))
class Historico:
def GET(self):
return render.layout('menu_historico', 'Nosso Histórico', render.pagina('historico'))
class ProjetosSociais:
def GET(self):
return render.layout('menu_projetos_sociais', 'Nossos Projetos Sociais', render.pagina('projetos-sociais'))
class Percurso:
def GET(self):
return render.layout('menu_percurso', 'O Percurso da Cavalgada', render.pagina('percurso'))
class Atividades:
def GET(self):
return render.layout('menu_atividades', 'As Atividades', render.pagina('atividades'))
class Comenda:
def GET(self):
return render.layout('menu_comenda', 'A Comenda e os Comendadores', render.pagina('comenda'))
class Premiacoes:
def GET(self):
return render.layout('menu_premiacoes', 'As Premiações', render.pagina('premiacoes'))
class Dicas:
def GET(self):
return render.layout('menu_dicas', 'Dicas da Cavalgada do Mar', render.pagina('dicas'))
class Albuns:
def GET(self):
return render.layout('menu_albuns', 'Os Albúns', render.pagina('albuns'))
class Fotos:
def GET(self):
return render.layout('menu_albuns', 'As Fotos', render.pagina('fotos'))
class Videos:
def GET(self):
return render.layout('menu_albuns', 'Os Videos', render.pagina('videos'))
class Depoimentos:
def GET(self):
return render.layout('menu_depoimentos', 'Os Depoímentos', render.pagina('depoimentos'))
class Patrocionadores:
def GET(self):
return render.layout('menu_patrocinadores', 'Os Patrocinadores', render.pagina('patrocinadores'))
class Inscricao:
def GET(self):
return render.layout('menu_inscricao', 'Faça sua Inscrição', render.pagina('inscricao'))
class Noticias:
def GET(self):
pagination = PaginatorPublicacao(web.input(), 'noticias', order='data_hora desc')
return render.layout('menu_noticias', 'Notícias', render.noticias(pagination))
def POST(self):
pagination = PaginatorPublicacao(web.input(), 'noticias', order='data_hora desc')
return render.layout('menu_noticias', 'Notícias', render.noticias(pagination))
class Noticia:
def GET(self, slug_noticia):
return render.layout('menu_noticias', 'Notícias', render.noticia(slug_noticia))
class Boletins:
def GET(self):
pagination = PaginatorPublicacao(web.input(), 'boletins', order='data_hora desc')
return render.layout('menu_home', 'Boletins', render.boletins(pagination))
def POST(self):
pagination = PaginatorPublicacao(web.input(), 'boletins', order='data_hora desc')
return render.layout('menu_home', 'Boletins', render.boletins(pagination))
class Boletim:
def GET(self, slug_boletim):
return render.layout('menu_home', 'Boletins', render.boletim(slug_boletim))
class Contato:
def GET(self):
return render.layout('menu_fale_conosco', 'Contatos', render.contato())
def POST(self):
try:
i = web.input()
assunto='Assunto: ' + break_string(i.assunto)
nome='O visitante ' + break_string(i.nome)
telefone=' com o telefone: ' + break_string(i.telefone)
email=' com o E-mail: ' + break_string(i.email)
mensagem='Deixou a seguinte mensagem: ' + '\n\t' + break_string(i.texto)
mensagem_completa = nome + telefone + email + mensagem
to_email = 'henrique@equineclinic.com.br'
web.sendmail(email, to_email, '%s' % assunto, '%s' % mensagem_completa)
raise web.seeother('/agradece-contato')
except Exception:
raise
class Agradecimento:
def GET(self):
return render.layout('menu_fale_conosco', 'Contatos', render.pagina('agradece-contato'))
class RSS:
def GET(self):
items=[]
noticias = model.get_publicacoes_rss('Notícia')
boletins = model.get_publicacoes_rss('Boletim')
if noticias:
for entry in noticias:
link= WEBSITE_URL + '/noticia/%s' % entry.slug
items.append(PyRSS2Gen.RSSItem(title=entry.titulo,
link=link,
description=entry.intro,
author='Fundação Cultural Cavalgada do Mar em Viamão - RS',
guid=PyRSS2Gen.Guid(link),
pubDate=entry.data_hora))
if boletins:
for entry in boletins:
link= WEBSITE_URL + '/boletim/%s' % entry.slug
items.append(PyRSS2Gen.RSSItem(title=entry.titulo,
link=link,
description=entry.intro,
author='Fundação Cultural Cavalgada do Mar em Viamão - RS',
guid=PyRSS2Gen.Guid(link),
pubDate=entry.data_hora))
titulo = 'RSS da Cavalgada do Mar'
descricao = 'Últimas publicações da Fundação Cultural Cavalgada do Mar em Porto Alegre - RS.'
rss=PyRSS2Gen.RSS2(title=titulo,
link= WEBSITE_URL + '/rss',
description=descricao,
lastBuildDate=datetime.now(),
items=items)
web.header('Content-Type', 'application/rss+xml; charset=utf-8')
return rss.to_xml()
app = web.application(urls, globals())
def main():
pass
| 34.585492 | 115 | 0.625019 |
import web
from web.contrib import PyRSS2Gen
import render_website as render
import model
import forms
import logging
from paginator import Paginator, PaginatorSearch, PaginatorPublicacao
from datetime import datetime
from configuration import WEBSITE_URL
from utils import break_string
urls = (
'', 'Index',
'/', 'Index',
'/index', 'Index',
'/quem-somos', 'QuemSomos',
'/historico', 'Historico',
'/projetos-sociais', 'ProjetosSociais',
'/percurso', 'Percurso',
'/atividades', 'Atividades',
'/comenda', 'Comenda',
'/premiacoes', 'Premiacoes',
'/dicas', 'Dicas',
'/albuns', 'Albuns',
'/fotos', 'Fotos',
'/videos', 'Videos',
'/depoimentos', 'Depoimentos',
'/patrocinadores', 'Patrocionadores',
'/inscricao', 'Inscricao',
'/noticias', 'Noticias',
'/noticia/(.+)', 'Noticia',
'/boletins', 'Boletins',
'/boletim/(.+)', 'Boletim',
'/fale-conosco', 'Contato',
'/agradece-contato', 'Agradecimento',
'/rss', 'RSS'
)
class Index:
def GET(self):
return render.layout('menu_home', 'Página Inicial do Site', render.index())
class QuemSomos:
def GET(self):
return render.layout('menu_quem_somos', 'Fundação Cavalgada do Mar', render.pagina('quem-somos'))
class Historico:
def GET(self):
return render.layout('menu_historico', 'Nosso Histórico', render.pagina('historico'))
class ProjetosSociais:
def GET(self):
return render.layout('menu_projetos_sociais', 'Nossos Projetos Sociais', render.pagina('projetos-sociais'))
class Percurso:
def GET(self):
return render.layout('menu_percurso', 'O Percurso da Cavalgada', render.pagina('percurso'))
class Atividades:
def GET(self):
return render.layout('menu_atividades', 'As Atividades', render.pagina('atividades'))
class Comenda:
def GET(self):
return render.layout('menu_comenda', 'A Comenda e os Comendadores', render.pagina('comenda'))
class Premiacoes:
def GET(self):
return render.layout('menu_premiacoes', 'As Premiações', render.pagina('premiacoes'))
class Dicas:
def GET(self):
return render.layout('menu_dicas', 'Dicas da Cavalgada do Mar', render.pagina('dicas'))
class Albuns:
def GET(self):
return render.layout('menu_albuns', 'Os Albúns', render.pagina('albuns'))
class Fotos:
def GET(self):
return render.layout('menu_albuns', 'As Fotos', render.pagina('fotos'))
class Videos:
def GET(self):
return render.layout('menu_albuns', 'Os Videos', render.pagina('videos'))
class Depoimentos:
def GET(self):
return render.layout('menu_depoimentos', 'Os Depoímentos', render.pagina('depoimentos'))
class Patrocionadores:
def GET(self):
return render.layout('menu_patrocinadores', 'Os Patrocinadores', render.pagina('patrocinadores'))
class Inscricao:
def GET(self):
return render.layout('menu_inscricao', 'Faça sua Inscrição', render.pagina('inscricao'))
class Noticias:
def GET(self):
pagination = PaginatorPublicacao(web.input(), 'noticias', order='data_hora desc')
return render.layout('menu_noticias', 'Notícias', render.noticias(pagination))
def POST(self):
pagination = PaginatorPublicacao(web.input(), 'noticias', order='data_hora desc')
return render.layout('menu_noticias', 'Notícias', render.noticias(pagination))
class Noticia:
def GET(self, slug_noticia):
return render.layout('menu_noticias', 'Notícias', render.noticia(slug_noticia))
class Boletins:
def GET(self):
pagination = PaginatorPublicacao(web.input(), 'boletins', order='data_hora desc')
return render.layout('menu_home', 'Boletins', render.boletins(pagination))
def POST(self):
pagination = PaginatorPublicacao(web.input(), 'boletins', order='data_hora desc')
return render.layout('menu_home', 'Boletins', render.boletins(pagination))
class Boletim:
def GET(self, slug_boletim):
return render.layout('menu_home', 'Boletins', render.boletim(slug_boletim))
class Contato:
def GET(self):
return render.layout('menu_fale_conosco', 'Contatos', render.contato())
def POST(self):
try:
i = web.input()
assunto='Assunto: ' + break_string(i.assunto)
nome='O visitante ' + break_string(i.nome)
telefone=' com o telefone: ' + break_string(i.telefone)
email=' com o E-mail: ' + break_string(i.email)
mensagem='Deixou a seguinte mensagem: ' + '\n\t' + break_string(i.texto)
mensagem_completa = nome + telefone + email + mensagem
to_email = 'henrique@equineclinic.com.br'
web.sendmail(email, to_email, '%s' % assunto, '%s' % mensagem_completa)
raise web.seeother('/agradece-contato')
except Exception:
raise
class Agradecimento:
def GET(self):
return render.layout('menu_fale_conosco', 'Contatos', render.pagina('agradece-contato'))
class RSS:
def GET(self):
items=[]
noticias = model.get_publicacoes_rss('Notícia')
boletins = model.get_publicacoes_rss('Boletim')
if noticias:
for entry in noticias:
link= WEBSITE_URL + '/noticia/%s' % entry.slug
items.append(PyRSS2Gen.RSSItem(title=entry.titulo,
link=link,
description=entry.intro,
author='Fundação Cultural Cavalgada do Mar em Viamão - RS',
guid=PyRSS2Gen.Guid(link),
pubDate=entry.data_hora))
if boletins:
for entry in boletins:
link= WEBSITE_URL + '/boletim/%s' % entry.slug
items.append(PyRSS2Gen.RSSItem(title=entry.titulo,
link=link,
description=entry.intro,
author='Fundação Cultural Cavalgada do Mar em Viamão - RS',
guid=PyRSS2Gen.Guid(link),
pubDate=entry.data_hora))
titulo = 'RSS da Cavalgada do Mar'
descricao = 'Últimas publicações da Fundação Cultural Cavalgada do Mar em Porto Alegre - RS.'
rss=PyRSS2Gen.RSS2(title=titulo,
link= WEBSITE_URL + '/rss',
description=descricao,
lastBuildDate=datetime.now(),
items=items)
web.header('Content-Type', 'application/rss+xml; charset=utf-8')
return rss.to_xml()
app = web.application(urls, globals())
def main():
pass
| true | true |
f720fd62a5d1381a1365405380ceac93188e3ca0 | 11,640 | py | Python | clients/client/python/ory_client/model/project_revisions.py | ALTELMA/sdk | a04d56edd0431382dda8a9d10229b8479174aa8e | [
"Apache-2.0"
] | null | null | null | clients/client/python/ory_client/model/project_revisions.py | ALTELMA/sdk | a04d56edd0431382dda8a9d10229b8479174aa8e | [
"Apache-2.0"
] | null | null | null | clients/client/python/ory_client/model/project_revisions.py | ALTELMA/sdk | a04d56edd0431382dda8a9d10229b8479174aa8e | [
"Apache-2.0"
] | null | null | null | """
Ory APIs
Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers. # noqa: E501
The version of the OpenAPI document: v0.0.1-alpha.93
Contact: support@ory.sh
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from ory_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from ory_client.exceptions import ApiAttributeError
def lazy_import():
from ory_client.model.project_revision import ProjectRevision
globals()['ProjectRevision'] = ProjectRevision
class ProjectRevisions(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'value': ([ProjectRevision],),
}
@cached_property
def discriminator():
return None
attribute_map = {}
read_only_vars = set()
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""ProjectRevisions - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] ([ProjectRevision]): # noqa: E501
Keyword Args:
value ([ProjectRevision]): # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
"""ProjectRevisions - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] ([ProjectRevision]): # noqa: E501
Keyword Args:
value ([ProjectRevision]): # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
self = super(OpenApiModel, cls).__new__(cls)
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
return self
| 40.842105 | 194 | 0.563574 |
import re
import sys
from ory_client.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from ory_client.exceptions import ApiAttributeError
def lazy_import():
from ory_client.model.project_revision import ProjectRevision
globals()['ProjectRevision'] = ProjectRevision
class ProjectRevisions(ModelSimple):
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
lazy_import()
return {
'value': ([ProjectRevision],),
}
@cached_property
def discriminator():
return None
attribute_map = {}
read_only_vars = set()
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
self = super(OpenApiModel, cls).__new__(cls)
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
return self
| true | true |
f720fe1037c1d4bf5fae4c4643726fa3e26e29a5 | 2,400 | py | Python | rlkit/core/eval_util.py | ethanabrooks/oyster | 08b758b15ca19c50c43a137cba733b79be55654a | [
"MIT"
] | null | null | null | rlkit/core/eval_util.py | ethanabrooks/oyster | 08b758b15ca19c50c43a137cba733b79be55654a | [
"MIT"
] | null | null | null | rlkit/core/eval_util.py | ethanabrooks/oyster | 08b758b15ca19c50c43a137cba733b79be55654a | [
"MIT"
] | null | null | null | """
Common evaluation utilities.
"""
from collections import OrderedDict
from numbers import Number
import os
import numpy as np
def dprint(*args):
# hacky, but will do for now
if int(os.environ["DEBUG"]) == 1:
print(args)
def get_generic_path_information(paths, stat_prefix=""):
"""
Get an OrderedDict with a bunch of statistic names and values.
"""
statistics = OrderedDict()
returns = [sum(path["rewards"]) for path in paths]
rewards = np.vstack([path["rewards"] for path in paths])
statistics.update(
create_stats_ordered_dict("Rewards", rewards, stat_prefix=stat_prefix)
)
statistics.update(
create_stats_ordered_dict("Returns", returns, stat_prefix=stat_prefix)
)
actions = [path["actions"] for path in paths]
if len(actions[0].shape) == 1:
actions = np.hstack([path["actions"] for path in paths])
else:
actions = np.vstack([path["actions"] for path in paths])
statistics.update(
create_stats_ordered_dict("Actions", actions, stat_prefix=stat_prefix)
)
statistics["Num Paths"] = len(paths)
return statistics
def get_average_returns(paths):
returns = [sum(path["rewards"]) for path in paths]
return np.mean(returns)
def create_stats_ordered_dict(
name, data, stat_prefix=None, always_show_all_stats=True, exclude_max_min=False,
):
if stat_prefix is not None:
name = "{} {}".format(stat_prefix, name)
if isinstance(data, Number):
return OrderedDict({name: data})
if len(data) == 0:
return OrderedDict()
if isinstance(data, tuple):
ordered_dict = OrderedDict()
for number, d in enumerate(data):
sub_dict = create_stats_ordered_dict("{0}_{1}".format(name, number), d,)
ordered_dict.update(sub_dict)
return ordered_dict
if isinstance(data, list):
try:
iter(data[0])
except TypeError:
pass
else:
data = np.concatenate(data)
if isinstance(data, np.ndarray) and data.size == 1 and not always_show_all_stats:
return OrderedDict({name: float(data)})
stats = OrderedDict(
[(name + " Mean", np.mean(data)), (name + " Std", np.std(data)),]
)
if not exclude_max_min:
stats[name + " Max"] = np.max(data)
stats[name + " Min"] = np.min(data)
return stats
| 28.235294 | 85 | 0.635 |
from collections import OrderedDict
from numbers import Number
import os
import numpy as np
def dprint(*args):
if int(os.environ["DEBUG"]) == 1:
print(args)
def get_generic_path_information(paths, stat_prefix=""):
statistics = OrderedDict()
returns = [sum(path["rewards"]) for path in paths]
rewards = np.vstack([path["rewards"] for path in paths])
statistics.update(
create_stats_ordered_dict("Rewards", rewards, stat_prefix=stat_prefix)
)
statistics.update(
create_stats_ordered_dict("Returns", returns, stat_prefix=stat_prefix)
)
actions = [path["actions"] for path in paths]
if len(actions[0].shape) == 1:
actions = np.hstack([path["actions"] for path in paths])
else:
actions = np.vstack([path["actions"] for path in paths])
statistics.update(
create_stats_ordered_dict("Actions", actions, stat_prefix=stat_prefix)
)
statistics["Num Paths"] = len(paths)
return statistics
def get_average_returns(paths):
returns = [sum(path["rewards"]) for path in paths]
return np.mean(returns)
def create_stats_ordered_dict(
name, data, stat_prefix=None, always_show_all_stats=True, exclude_max_min=False,
):
if stat_prefix is not None:
name = "{} {}".format(stat_prefix, name)
if isinstance(data, Number):
return OrderedDict({name: data})
if len(data) == 0:
return OrderedDict()
if isinstance(data, tuple):
ordered_dict = OrderedDict()
for number, d in enumerate(data):
sub_dict = create_stats_ordered_dict("{0}_{1}".format(name, number), d,)
ordered_dict.update(sub_dict)
return ordered_dict
if isinstance(data, list):
try:
iter(data[0])
except TypeError:
pass
else:
data = np.concatenate(data)
if isinstance(data, np.ndarray) and data.size == 1 and not always_show_all_stats:
return OrderedDict({name: float(data)})
stats = OrderedDict(
[(name + " Mean", np.mean(data)), (name + " Std", np.std(data)),]
)
if not exclude_max_min:
stats[name + " Max"] = np.max(data)
stats[name + " Min"] = np.min(data)
return stats
| true | true |
f720ff6a241c7d87d8b54a04ab91ce4d35a8ee45 | 55,439 | py | Python | dlpy/timeseries.py | qzlvyh/sassoftware-python-dlpy | 9bf8cc4ffd5ae235e377004644ef70398431e09c | [
"Apache-2.0"
] | 1 | 2019-04-02T14:36:55.000Z | 2019-04-02T14:36:55.000Z | dlpy/timeseries.py | qzlvyh/sassoftware-python-dlpy | 9bf8cc4ffd5ae235e377004644ef70398431e09c | [
"Apache-2.0"
] | null | null | null | dlpy/timeseries.py | qzlvyh/sassoftware-python-dlpy | 9bf8cc4ffd5ae235e377004644ef70398431e09c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' Timeseries related classes and functions '''
from __future__ import (print_function, division, absolute_import, unicode_literals)
from swat.cas.table import CASTable
from .utils import random_name, get_cas_host_type, char_to_double, int_to_double
from dlpy.utils import DLPyError
from swat.cas import datamsghandlers
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
import datetime
import numbers
import re
import swat
def plot_timeseries(tbl, timeid, timeseries, figure=None,
groupid=None, start_time=None, end_time=None, xlim=None,
ylim=None, xlabel=None, ylabel=None, xdate_format=None,
title=None, figsize=None,
fontsize_spec=None, **kwargs):
'''
Create an timeseries line plot from a CASTable or pandas DataFrame
Parameters
----------
tbl : :class:`CASTable` or :class:`pandas.DataFrame` or :class:`pandas.Series`
The input table for the plot. If it is CASTable, it will be fetched to
the client. If it is pandas.Series, the index name will become timeid,
the series name will become timeseries.
timeid : str
The name of the timeid variable. It will be the value to be used in the
x-axis.
timeseries : str
The name of the column contains the timeseries value. It will be the
value to be used in the y-axis.
figure : two-element-tuple, optional
The tuple must be in the form (:class:`matplotlib.figure.Figure`,
:class:`matplotlib.axes.Axes`). These are the figure and axes that the
user wants to plot on. It can be used to plot new timeseries plot on
pre-existing figures.
Default: None
groupid : dict, optional
It is in the format {column1 : value1, column2 : value2, ...}.
It is used to plot subset of the data where column1 = value1 and
column2 = value2, etc.
Default: None, which means do not subset the data.
start_time : :class:`datetime.datetime` or :class:`datetime.date`, optional
The start time of the plotted timeseries.
Default: None, which means the plot starts at the beginning of the
timeseries.
end_time : :class:`datetime.datetime` or :class:`datetime.date`, optional
The end time of the plotted timeseries.
Default: None, which means the plot ends at the end of the timeseries.
xlim : tuple, optional
Set the data limits for the x-axis.
Default: None
ylim : tuple, optional
Set the data limits for the y-axis.
Default: None
xlabel : string, optional
Set the label for the x-axis.
ylabel : string, optional
Set the label for the y-axis.
xdate_format : string, optional
If the x-axis represents date or datetime, this is the date or datetime
format string. (e.g. '%Y-%m-%d' is the format of 2000-03-10,
refer to documentation for :meth:`datetime.datetime.strftime`)
Default: None
title : string, optional
Set the title of the figure.
Default: None
figsize : tuple, optional
The size of the figure.
Default: None
fontsize_spec : dict, optional
It specifies the fontsize for 'xlabel', 'ylabel', 'xtick', 'ytick',
'legend' and 'title'. (e.g. {'xlabel':14, 'ylabel':14}).
If None, and figure is specified, then it will take from provided
figure object. Otherwise, it will take the default fontsize, which are
{'xlabel':16, 'ylabel':16, 'xtick':14, 'ytick':14, 'legend':14, 'title':20}
Default: None
`**kwargs` : keyword arguments, optional
Options to pass to matplotlib plotting method.
Returns
-------
(:class:`matplotlib.figure.Figure`, :class:`matplotlib.axes.Axes`)
'''
default_fontsize_spec = {'xlabel':16, 'ylabel':16, 'xtick':14,
'ytick':14, 'legend':14, 'title':20}
if figure is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
if fontsize_spec is not None:
default_fontsize_spec.update(fontsize_spec)
fontsize_spec = default_fontsize_spec
else:
fig, ax = figure
if fontsize_spec is None:
fontsize_spec = {}
if 'legend' not in fontsize_spec.keys():
fontsize_spec['legend'] = default_fontsize_spec['legend']
if isinstance(tbl, CASTable):
if groupid is None:
tbl = tbl.to_frame()
else:
where_clause_list = []
for gid in groupid.keys():
where_clause_list.append(gid + '=' + str(groupid[gid]))
where_clause = ' and '.join(where_clause_list)
tbl = tbl.query(where_clause)
tbl = tbl.to_frame()
else:
if isinstance(tbl, pd.Series):
timeseries = tbl.name
tbl = tbl.reset_index()
timeid = [colname for colname in tbl.columns if colname != timeseries][0]
if groupid is not None:
for gid in groupid.keys():
tbl = tbl.loc[tbl[gid]==groupid[gid]]
if not (np.issubdtype(tbl[timeid].dtype, np.integer) or
np.issubdtype(tbl[timeid].dtype, np.floating)):
tbl[timeid] = pd.to_datetime(tbl[timeid])
fig.autofmt_xdate()
if xdate_format is not None:
import matplotlib.dates as mdates
xfmt = mdates.DateFormatter(xdate_format)
ax.xaxis.set_major_formatter(xfmt)
if start_time is not None:
if isinstance(start_time, datetime.date):
start_time = pd.Timestamp(start_time)
tbl = tbl.loc[tbl[timeid]>=start_time]
if end_time is not None:
if isinstance(start_time, datetime.date):
end_time = pd.Timestamp(end_time)
tbl = tbl.loc[tbl[timeid]<=end_time]
tbl = tbl.sort_values(timeid)
ax.plot(tbl[timeid], tbl[timeseries], **kwargs)
if xlabel is not None:
if 'xlabel' in fontsize_spec.keys():
ax.set_xlabel(xlabel, fontsize=fontsize_spec['xlabel'])
else:
ax.set_xlabel(xlabel)
elif figure is not None:
if 'xlabel' in fontsize_spec.keys():
ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize_spec['xlabel'])
else:
ax.set_xlabel(timeid, fontsize=fontsize_spec['xlabel'])
if ylabel is not None:
if 'ylabel' in fontsize_spec.keys():
ax.set_ylabel(ylabel, fontsize=fontsize_spec['ylabel'])
else:
ax.set_ylabel(ylabel)
elif figure is not None:
if 'ylabel' in fontsize_spec.keys():
ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize_spec['ylabel'])
else:
ax.set_ylabel(timeseries, fontsize=fontsize_spec['ylabel'])
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
if title is not None:
if 'title' in fontsize_spec.keys():
ax.set_title(title, fontsize=fontsize_spec['title'])
else:
ax.set_title(title)
elif figure is not None:
if 'title' in fontsize_spec.keys():
ax.set_title(ax.get_title(), fontsize=fontsize_spec['title'])
ax.legend(loc='best', bbox_to_anchor=(1, 1), prop={'size': fontsize_spec['legend']})
if 'xtick' in fontsize_spec.keys():
ax.get_xaxis().set_tick_params(direction='out', labelsize=fontsize_spec['xtick'])
else:
ax.get_xaxis().set_tick_params(direction='out')
if 'ytick' in fontsize_spec.keys():
ax.get_yaxis().set_tick_params(direction='out', labelsize=fontsize_spec['ytick'])
else:
ax.get_yaxis().set_tick_params(direction='out')
return (fig, ax)
class TimeseriesTable(CASTable):
'''
Table for preprocessing timeseries
It creates an instance of :class:`TimeseriesTable` by loading from
files on the server side, or files on the client side, or in
memory :class:`CASTable`, :class:`pandas.DataFrame` or
:class:`pandas.Series. It then performs inplace timeseries formatting,
timeseries accumulation, timeseries subsequence generation, and
timeseries partitioning to prepare the timeseries into a format that
can be followed by subsequent deep learning models.
Parameters
----------
name : string, optional
Name of the CAS table
timeid : string, optional
Specifies the column name for the timeid.
Default: None
groupby_var : string or list-of-strings, optional
The groupby variables.
Default: None.
sequence_opt : dict, optional
Dictionary with keys: 'input_length', 'target_length' and 'token_size'.
It will be created by the prepare_subsequences method.
Default: None
inputs_target : dict, optional
Dictionary with keys: 'inputs', 'target'.
It will be created by the prepare_subsequences method.
Default: None
Returns
-------
:class:`TimeseriesTable`
'''
running_caslib = None
def __init__(self, name, timeid=None, groupby_var=None,
sequence_opt=None, inputs_target=None, **table_params):
CASTable.__init__(self, name, **table_params)
self.timeid = timeid
self.groupby_var = groupby_var
self.sequence_opt = sequence_opt
self.inputs_target = inputs_target
@classmethod
def from_table(cls, tbl, columns=None, casout=None):
'''
Create an TimeseriesTable from a CASTable
Parameters
----------
tbl : :class:`CASTable`
The CASTable object to use as the source.
columns : list-of-strings, optional
Columns to keep when loading the data.
None means it will include all the columns from the source.
Empty list means include no column, which will generate empty data.
Default: None
casout : dict or :class:`CASTable`, optional
if it is dict, it specifies the output CASTable parameters.
if it is CASTable, it is the CASTable that will be overwritten.
None means a new CASTable with random name will be generated.
Default: None
Returns
-------
:class:`TimeseriesTable`
'''
input_tbl_params = tbl.to_outtable_params()
input_tbl_name = input_tbl_params['name']
conn = tbl.get_connection()
if casout is None:
casout_params = {}
elif isinstance(casout, CASTable):
casout_params = casout.to_outtable_params()
elif isinstance(casout, dict):
casout_params = casout
if 'name' not in casout_params:
casout_params['name'] = random_name('Timeseries', 6)
output_tbl_name = casout_params['name']
if columns is None:
keep_col_sascode = '''
data {0};
set {1};
run;
'''.format(output_tbl_name, input_tbl_name)
conn.retrieve('dataStep.runCode', _messagelevel='error',
code=keep_col_sascode)
else:
if not isinstance(columns, list):
columns = [columns]
keepcol = ' '.join(columns)
keep_col_sascode = '''
data {0};
set {1};
keep {2};
run;
'''.format(output_tbl_name, input_tbl_name, keepcol)
conn.retrieve('dataStep.runCode', _messagelevel='error',
code=keep_col_sascode)
out = cls(**casout_params)
out.set_connection(conn)
return out
@classmethod
def from_pandas(cls, conn, pandas_df, casout=None):
'''
Create an TimeseriesTable from a pandas DataFrame or Series
Parameters
----------
conn : CAS
The CAS connection object
pandas_df : :class:`pandas.DataFrame` or :class:`pandas.Series`
The pandas dataframe or series to use as the source.
casout : dict or :class:`CASTable`, optional
if it is dict, it specifies the output CASTable parameters.
if it is CASTable, it is the CASTable that will be overwritten.
None means a new CASTable with random name will be generated.
Default: None
Returns
-------
:class:`TimeseriesTable`
'''
if isinstance(pandas_df, pd.Series):
pandas_df = pandas_df.reset_index()
if casout is None:
casout_params = {}
elif isinstance(casout, CASTable):
casout_params = casout.to_outtable_params()
elif isinstance(casout, dict):
casout_params = casout
if 'name' not in casout_params:
casout_params['name'] = random_name('Timeseries', 6)
output_tbl_name = casout_params['name']
handler = datamsghandlers.PandasDataFrame(pandas_df)
conn.addtable(table=output_tbl_name, replace=True, **handler.args.addtable)
tbl = conn.CASTable(name=output_tbl_name)
return cls.from_table(tbl, columns=None, casout=casout_params)
@classmethod
def from_localfile(cls, conn, path, columns=None, importoptions=None,
casout=None):
'''
Create an TimeseriesTable from a file on the client side.
Parameters
----------
conn : CAS
The CAS connection object
path : string
The full path to the local file that will be uploaded to the server.
columns : list-of-strings, optional
Columns to keep when loading the data.
None means it will include all the columns from the source.
Empty list means to include no column, which will generate empty data.
Default: None
importoptions : dict, optional
Options to import data and upload to the server, such as filetype,
delimiter, etc. None means use the default 'auto' method in the
importoptions from CAS.upload.
Default: None
casout : dict or :class:`CASTable`, optional
If it is dict, it specifies the output CASTable parameters.
If it is CASTable, it is the CASTable that will be overwritten.
None means a new CASTable with random name will be generated.
Default: None
Returns
-------
:class:`TimeseriesTable`
'''
if casout is None:
casout_params = {}
elif isinstance(casout, CASTable):
casout_params = casout.to_outtable_params()
elif isinstance(casout, dict):
casout_params = casout
if 'name' not in casout_params:
casout_params['name'] = random_name('Timeseries', 6)
if importoptions is None:
importoptions = {}
upload_result = conn.upload(path,
importoptions=importoptions,
casout=casout_params)
tbl = conn.CASTable(**casout_params)
return cls.from_table(tbl, columns=columns, casout=casout_params)
@classmethod
def from_serverfile(cls, conn, path, columns=None, caslib=None,
importoptions=None, casout=None):
'''
Create an TimeseriesTable from a file on the server side
Parameters
----------
conn : CAS
The CAS connection object
path : string
The path that the server can access. If the caslib is specified,
it is relative path to the file with respect to the caslib.
otherwise, it is the full path to the file.
columns : list-of-strings, optional
columns to keep when loading the data.
None means it will include all the columns from the source.
Empty list means include no column, which will generate empty data.
Default: None
caslib : string, optional
The name of the caslib which contains the file to be uploaded.
Default: None
importoptions : dict, optional
Options to import data and upload to the server, such as filetype,
delimiter, etc. None means use the default 'auto' method in the
importoptions from CAS.upload.
Default: None
casout : dict or :class:`CASTable`, optional
If it is dict, it specifies the output CASTable parameters.
If it is CASTable, it is the CASTable that will be overwritten.
None means a new CASTable with random name will be generated.
Default: None
Returns
-------
:class:`TimeseriesTable`
'''
if casout is None:
casout_params = {}
elif isinstance(casout, CASTable):
casout_params = casout.to_outtable_params()
elif isinstance(casout, dict):
casout_params = casout
if 'name' not in casout_params:
casout_params['name'] = random_name('Timeseries', 6)
if importoptions is None:
importoptions = {}
if caslib is None:
caslib, rest_path = cls.find_file_caslib(conn, path)
if caslib is None:
server_type = get_cas_host_type(conn).lower()
if server_type.startswith("lin") or server_type.startswith("osx"):
path_split = path.rsplit("/", 1)
else:
path_split = path.rsplit("\\", 1)
caslib = random_name('Caslib', 6)
rt1 = conn.retrieve('addcaslib', _messagelevel='error',
name=caslib, path=path_split[0],
activeonadd=False, subdirectories=False,
datasource={'srctype':'path'})
if rt1.severity < 2:
rt2 = conn.retrieve('table.loadTable',
_messagelevel='error',
casout=casout_params,
caslib=caslib,
importoptions=importoptions,
path=path_split[1])
if rt2.severity > 1:
for msg in rt2.messages:
print(msg)
raise DLPyError('cannot load files, something is wrong!')
else:
for msg in rt1.messages:
print(msg)
raise DLPyError('''cannot create caslib with path:{},
something is wrong!'''.format(path_split[0]))
else:
rt3 = conn.retrieve('table.loadTable',
_messagelevel='error',
casout=casout_params,
caslib=caslib,
importoptions=importoptions,
path=rest_path)
if rt3.severity > 1:
for msg in rt3.messages:
print(msg)
raise DLPyError('cannot load files, something is wrong!')
else:
rt4 = conn.retrieve('table.loadTable',
_messagelevel='error',
casout=casout_params,
caslib=caslib,
importoptions=importoptions,
path=path)
if rt4.severity > 1:
for msg in rt4.messages:
print(msg)
raise DLPyError('cannot load files, something is wrong!')
tbl = conn.CASTable(**casout_params)
return cls.from_table(tbl, columns=columns, casout=casout_params)
def timeseries_formatting(self, timeid, timeseries,
timeid_informat=None, timeid_format=None,
extra_columns=None):
'''
Format the TimeseriesTable
Format timeid into appropriate format and check and format
timeseries columns into numeric columns.
Parameters
----------
timeid : string
Specifies the column name for the timeid.
timeseries : string or list-of-strings
Specifies the column name for the timeseries, that will be part of
the input or output of the RNN. If str, then it is univariate
time series. If list of strings, then it is multivariate timeseries.
timeid_informat : string, optional
if timeid is in the string format, this is required to parse the
timeid column.
Default: None
timeid_format : string, optional
Specifies the SAS format that the timeid column will be stored in
after parsing.
None means it will be stored in numeric form, not a specific date or datetime format.
Default: None
extra_columns : string or list-of-strings, optional
Specifies the addtional columns to be included.
Empty list means to include no extra columns other than timeid and timeseries.
if None, all columns are included.
Default: None
'''
self.timeid = timeid
self.timeseries = timeseries
self.timeid_format = timeid_format
self.timeid_informat = timeid_informat
self.extra_columns = extra_columns
input_tbl_params = self.to_outtable_params()
input_tbl_name = input_tbl_params['name']
conn = self.get_connection()
tbl_colinfo = self.columninfo().ColumnInfo
if self.timeid_format is None:
if self.timeid_informat is None:
self.timeid_format = self.timeid_informat
elif self.timeid_informat.lower().startswith('anydtdtm'):
self.timeid_format = 'DATETIME19.'
else:
self.timeid_format = self.timeid_informat
if (((self.timeid_type not in ['double', 'date', 'datetime'])
and (not self.timeid_type.startswith('int')))
and (self.timeid_informat is not None)):
fmt_code = '''
data {0};
set {0}(rename=({1}=c_{1}));
{1} = input(c_{1},{2});
drop c_{1};
format {1} {3};
run;
'''.format(input_tbl_name, self.timeid,
self.timeid_informat, self.timeid_format)
conn.retrieve('dataStep.runCode', _messagelevel='error', code=fmt_code)
elif (((self.timeid_type not in ['double', 'date', 'datetime'])
and (not self.timeid_type.startswith('int')))
and (self.timeid_informat is None)):
raise ValueError('''timeid variable is not in the numeric format,
so timeid_informat is required for parsing the timeid variable.
''')
elif (self.timeid_format is not None):
fmt_code = '''
data {0};
set {0};
format {1} {2};
run;
'''.format(input_tbl_name, self.timeid, self.timeid_format)
conn.retrieve('dataStep.runCode', _messagelevel='error', code=fmt_code)
else:
fmt_code = '''
data {0};
set {0};
run;
'''.format(input_tbl_name)
conn.retrieve('dataStep.runCode', _messagelevel='error', code=fmt_code)
tbl_colinfo = self.columninfo().ColumnInfo
if not isinstance(self.timeseries, list):
self.timeseries = [self.timeseries]
if set(self.timeseries).issubset(tbl_colinfo.Column):
char_to_double(conn, tbl_colinfo, input_tbl_name,
input_tbl_name, self.timeseries)
else:
raise ValueError('''One or more variables specified in 'timeseries'
do not exist in the input table.
''')
if self.extra_columns is not None:
if not isinstance(self.extra_columns, list):
self.extra_columns = [self.extra_columns]
keepcol = [self.timeid]
keepcol.extend(self.timeseries + self.extra_columns)
keepcol = ' '.join(keepcol)
keep_col_sascode = '''
data {0};
set {0};
keep {1};
run;
'''.format(input_tbl_name, keepcol)
conn.retrieve('dataStep.runCode', _messagelevel='error', code=keep_col_sascode)
print('NOTE: Timeseries formatting is completed.')
def timeseries_accumlation(self, acc_interval='day',timeid=None,
timeseries=None, groupby=None,
extra_num_columns=None, default_ts_acc='sum',
default_col_acc = 'avg',
acc_method_byvar=None):
'''
Accumulate the TimeseriesTable into regular consecutive intervals
Parameters
----------
acc_interval : string, optional
The accumulation interval, such as 'year', 'qtr', 'month', 'week',
'day', 'hour', 'minute', 'second'.
timeid : string, optional
Specifies the column name for the timeid.
If None, it will take the timeid specified in timeseries_formatting.
Default: None
timeseries : string or list-of-strings, optional
Specifies the column name for the timeseries, that will be part of
the input or output of the RNN. If str, then it is univariate
time series. If list of strings, then it is multivariate timeseries.
If None, it will take the timeseries specified in timeseries_formatting.
Default: None
groupby : string or list-of-strings, optional
The groupby variables.
Default: None
extra_num_columns : string or list-of-strings, optional
Specifies the addtional numeric columns to be included for
accumulation. These columns can include static feature, and might
be accumulated differently than the timeseries that will be used
in RNN. if None, it means no additional numeric columns will be
accumulated for later processing and modeling.
Default: None
default_ts_acc : string, optional
Default accumulation method for timeseries.
Default: sum
default_col_acc : string, optional
Default accumulation method for additional numeric columns
Default: avg
acc_method_byvar : dict, optional
It specifies specific accumulation method for individual columns,
if the method is different from the default.
It has following structure: {'column1 name': 'accumulation method1',
'column2 name': 'accumulation method2', ...}
Default: None
'''
if (timeid is None) and (self.timeid is None):
raise DLPyError('''timeid is not specified, consider specifying
and formatting it with timeseries_formatting''')
elif (timeid is not None) and (timeid != self.timeid):
warnings.warn('''timeid has not been formatted by timeseries_formatting,
consider reload the data and use timeseries_formatting to format the data,
unless the data has already been pre-formatted.''')
self.timeid = timeid
if timeseries is None:
if ((hasattr(self, 'timeseries') and self.timeseries is None) or
(not hasattr(self, 'timeseries'))):
raise DLPyError('''timeseries is not specified, consider specifying
and formatting it with timeseries_formatting''')
else:
if not isinstance(timeseries, list):
timeseries = [timeseries]
if ((hasattr(self, 'timeseries') and (self.timeseries is None)) or
(not hasattr(self, 'timeseries'))):
warnings.warn('''timeseries has not been formatted by timeseries_formatting,
consider reload the data and use timeseries_formatting to format the data,
unless the data has already been pre-formatted.''')
elif not set(timeseries).issubset(self.timeseries):
warnings.warn('''timeseries contains variable(s) that has not been
formatted by timeseries_formatting, consider reload the data and use
timeseries_formatting to format the data,
unless the data has already been pre-formatted.''')
self.timeseries = timeseries
self.groupby_var = groupby
self.extra_num_columns = extra_num_columns
input_tbl_params = self.to_outtable_params()
input_tbl_name = input_tbl_params['name']
conn = self.get_connection()
conn.loadactionset('timeData')
tbl_colinfo = self.columninfo().ColumnInfo
if self.groupby_var is None:
self.groupby_var = []
elif not isinstance(self.groupby_var, list):
self.groupby_var = [self.groupby_var]
if set(self.groupby_var).issubset(tbl_colinfo.Column):
int_to_double(conn, tbl_colinfo, input_tbl_name,
input_tbl_name, self.groupby_var)
else:
raise ValueError('''One or more variables specified in 'groupby'
do not exist in the input table.
''')
tbl_colinfo = self.columninfo().ColumnInfo
#Check timeid is in the input columns
if self.timeid not in tbl_colinfo.Column.values:
raise ValueError('''variable 'timeid' does not exist in input table.
''')
#Check timeseries is in the input columns
if not isinstance(self.timeseries, list):
self.timeseries = [self.timeseries]
if not set(self.timeseries).issubset(tbl_colinfo.Column):
raise ValueError('''One or more variables specified in 'timeseries'
do not exist in the input table.
''')
#Check extra_num_columns is in the input columns
if self.extra_num_columns is None:
self.extra_num_columns = []
elif not isinstance(self.extra_num_columns, list):
self.extra_num_columns = [self.extra_num_columns]
if not set(self.extra_num_columns).issubset(tbl_colinfo.Column):
raise ValueError('''One or more variables specified in 'extra_num_columns'
do not exist in the input table.
''')
if self.timeid_type == 'datetime':
acc_interval = 'dt' + acc_interval
elif ((self.timeid_type == 'date')
and (acc_interval.lower() in ['hour', 'minute', 'second'])):
raise ValueError('''the acc_interval has higher frequency than day,
yet the timeid variable is in the date format.
''')
if acc_method_byvar is None:
acc_method_byvar = {}
serieslist = []
for ts in self.timeseries:
if ts in acc_method_byvar.keys():
method_dict = {'acc':acc_method_byvar[ts],'name':ts}
serieslist.append(method_dict)
else:
method_dict = {'acc':default_ts_acc,'name':ts}
serieslist.append(method_dict)
for extra_col in self.extra_num_columns:
if extra_col in self.timeseries:
warnings.warn('''
columns in extra_num_columns are also found in
timeseries, and will be ignored.
''')
continue
elif extra_col in acc_method_byvar.keys():
method_dict = {'acc':acc_method_byvar[extra_col],'name':extra_col}
serieslist.append(method_dict)
else:
method_dict = {'acc':default_col_acc,'name':extra_col}
serieslist.append(method_dict)
acc_result = conn.retrieve('timedata.timeseries', _messagelevel='error',
table={'groupby':self.groupby_var,'name': input_tbl_name},
series=serieslist,
timeid=self.timeid,
interval=acc_interval,
trimid='BOTH',
sumout=dict(name=input_tbl_name + '_summary', replace=True),
casout=dict(name=input_tbl_name, replace=True))
if acc_interval.startswith('dt'):
print('NOTE: Timeseries are accumulated to the frequency of {}'.format(acc_interval[2:]))
else:
print('NOTE: Timeseries are accumulated to the frequency of {}'.format(acc_interval))
def prepare_subsequences(self, seq_len, target, predictor_timeseries=None,
timeid=None, groupby=None,
input_length_name='xlen', target_length_name='ylen',
missing_handling='drop'):
'''
Prepare the subsequences that will be pass into RNN
Parameters
----------
seq_len : int
subsequence length that will be passed onto RNN.
target : string
the target variable for RNN. Currenly only support univariate target,
so only string is accepted here, not list of strings.
predictor_timeseries : string or list-of-strings, optional
Timeseries that will be used to predict target. They will be preprocessed
into subsequences as well. If None, it will take the target timeseries
as the predictor, which corresponds to auto-regressive models.
Default: None
timeid : string, optional
Specifies the column name for the timeid.
If None, it will take the timeid specified in timeseries_accumlation.
Default: None
groupby : string or list-of-strings, optional
The groupby variables. if None, it will take the groupby specified
in timeseries_accumlation.
Default: None
input_length_name : string, optional
The column name in the CASTable specifying input sequence length.
Default: xlen
target_length_name : string, optional
The column name in the CASTable specifying target sequence length.
currently target length only support length 1 for numeric sequence.
Default: ylen
missing_handling : string, optional
How to handle missing value in the subsequences.
default: drop
'''
tbl_colinfo = self.columninfo().ColumnInfo
input_tbl_params = self.to_outtable_params()
input_tbl_name = input_tbl_params['name']
conn = self.get_connection()
if timeid is not None:
self.timeid = timeid
elif self.timeid is None:
raise ValueError('''timeid is not specified''')
if self.timeid not in tbl_colinfo.Column.values:
raise ValueError('''timeid does not exist in the input table''')
if groupby is not None:
self.groupby_var = groupby
if self.groupby_var is None:
self.groupby_var = []
elif not isinstance(self.groupby_var, list):
self.groupby_var = [self.groupby_var]
if set(self.groupby_var).issubset(tbl_colinfo.Column):
int_to_double(conn, tbl_colinfo, input_tbl_name,
input_tbl_name, self.groupby_var)
else:
raise ValueError('''One or more variables specified in 'groupby'
do not exist in the input table.
''')
if isinstance(target, list):
if len(target) > 1:
raise DLPyError('''currently only support univariate target''')
else:
target = [target]
if predictor_timeseries is None:
predictor_timeseries = target
elif not isinstance(predictor_timeseries, list):
predictor_timeseries = [predictor_timeseries]
if set(target).issubset(predictor_timeseries):
independent_pred = [var for var in predictor_timeseries
if var not in target]
self.auto_regressive = True
else:
independent_pred = predictor_timeseries
self.auto_regressive = False
if not set(target).issubset(tbl_colinfo.Column):
raise ValueError('''invalid target variable''')
if len(independent_pred) > 0:
if not set(independent_pred).issubset(tbl_colinfo.Column):
raise ValueError('''columns in predictor_timeseries are absent from
the accumulated timeseriest table.''')
if self.timeseries is None:
warnings.warn('''timeseries has not been formatted by timeseries_formatting,
consider reload the data and use timeseries_formatting to format the data,
unless the data has already been pre-formatted.''')
else:
if not set(target).issubset(self.timeseries):
warnings.warn('''target is not in pre-formatted timeseries,
consider reload the data and use timeseries_formatting to format the data,
unless the data has already been pre-formatted.''')
if len(independent_pred) > 0:
if not set(independent_pred).issubset(self.timeseries):
warnings.warn('''
some of predictor_timeseries are not in pre-accumulated timeseries,\n
consider reload the data and use timeseries_accumulation to accumulate the data,\n
unless the data has already been pre-formatted.
''')
self.target = target[0]
self.independent_pred = independent_pred
self.seq_len = seq_len
if self.seq_len < 1:
raise ValueError('''RNN sequence length at least need to be 1''')
sasCode = 'data {0}; set {0}; by {1} {2};'.format(
input_tbl_name, ' '.join(self.groupby_var), self.timeid)
if self.seq_len > 1:
for var in self.independent_pred:
sasCode += self.create_lags(var, self.seq_len - 1, self.groupby_var)
if self.auto_regressive:
sasCode += self.create_lags(self.target, self.seq_len, self.groupby_var)
sasCode += '{0} = {1};'.format(input_length_name, self.seq_len)
sasCode += '{} = 1;'.format(target_length_name) # Currently only support one timestep numeric output.
if missing_handling == 'drop':
sasCode += 'if not cmiss(of _all_) then output {};'.format(input_tbl_name)
sasCode += 'run;'
if len(self.groupby_var) == 0:
conn.retrieve('dataStep.runCode', _messagelevel='error', code=sasCode,
single='Yes')
else:
conn.retrieve('dataStep.runCode', _messagelevel='error', code=sasCode)
self.input_vars = []
for i in range(self.seq_len):
if self.auto_regressive:
self.input_vars.append('{0}_lag{1}'.format(self.target, i+1))
for var in self.independent_pred:
if i == 0:
self.input_vars.append(var)
else:
self.input_vars.append('{0}_lag{1}'.format(var, i))
self.input_vars.reverse()
self.tokensize = len(predictor_timeseries)
self.sequence_opt = dict(input_length=input_length_name,
target_length=target_length_name,
token_size=self.tokensize)
self.inputs_target = dict(inputs=self.input_vars,
target=self.target)
print('NOTE: timeseries subsequences are prepared with subsequence length = {}'.format(seq_len))
@property
def timeid_type(self):
tbl_colinfo = self.columninfo().ColumnInfo
timeid_type = self.identify_coltype(self.timeid, tbl_colinfo)
return timeid_type
@staticmethod
def identify_coltype(col, tbl_colinfo):
if col not in tbl_colinfo.Column.values:
raise ValueError('''variable {} does not exist in input table.
'''.format(col))
if 'Format' in tbl_colinfo.columns:
cas_timeid_fmt = tbl_colinfo.Format[tbl_colinfo.Column == col].values[0]
else:
cas_timeid_fmt = None
col_type = tbl_colinfo.Type[tbl_colinfo.Column == col].values[0]
if cas_timeid_fmt:
for pattern in swat.options.cas.dataset.date_formats:
if re.match(r'{}\Z'.format(pattern), cas_timeid_fmt):
col_type = 'date'
break
for pattern in swat.options.cas.dataset.datetime_formats:
if re.match(r'{}\Z'.format(pattern), cas_timeid_fmt):
if col_type == 'date':
raise DLPyError('''{} format in CASTable is ambiguous,
and can match both sas date and sas datetime format'''.format(col))
else:
col_type = 'datetime'
break
return col_type
def timeseries_partition(self, training_start=None, validation_start=None,
testing_start=None, end_time=None,
partition_var_name='split_id',
traintbl_suffix='train',
validtbl_suffix='valid',
testtbl_suffix='test'):
'''
Split the dataset into training, validation and testing set
Parameters
----------
training_start : float or :class:`datetime.datetime` or :class:`datetime.date`, optional
The training set starting time stamp. if None, the training set
start at the earliest observation record in the table.
Default: None
validation_start : float or :class:`datetime.datetime` or :class:`datetime.date`, optional
The validation set starting time stamp. The training set
ends right before it. If None, there is no validation set,
and the training set ends right before the start of
testing set.
Default: None
testing_start : float or :class:`datetime.datetime` or :class:`datetime.date`, optional
The testing set starting time stamp. The validation set
(or training set if validation set is not specified) ends
right before it. If None, there is no testing set, and
the validation set (or training set if validation set is
not set) ends at the end_time.
Default: None
end_time : float or :class:`datetime.datetime` or :class:`datetime.date`, optional
The end time for the table.
partition_var_name : string, optional
The name of the indicator column that indicates training,
testing and validation.
Default: 'split_id'.
traintbl_suffix : string, optional
The suffix name of the CASTable for the training set.
Default: 'train'
validtbl_suffix : string, optional
The suffix name of the CASTable for the validation set.
Default: 'valid'
testtbl_suffix : string, optional
The suffix name of the CASTable for the testing set.
Default: 'test'
Returns
-------
( training TimeseriesTable, validation TimeseriesTable, testing TimeseriesTable )
'''
self.partition_var_name = partition_var_name
conn = self.get_connection()
training_start = self.convert_to_sas_time_format(training_start, self.timeid_type)
validation_start = self.convert_to_sas_time_format(validation_start, self.timeid_type)
testing_start = self.convert_to_sas_time_format(testing_start, self.timeid_type)
end_time = self.convert_to_sas_time_format(end_time, self.timeid_type)
if testing_start is None:
testing_start = end_time
test_statement = ';'
else:
test_statement = self.generate_splitting_code(
self.timeid, testing_start, end_time,
True, self.partition_var_name, 'test')
if validation_start is None:
validation_start = testing_start
valid_statement = ';'
else:
if testing_start == end_time:
valid_statement = self.generate_splitting_code(
self.timeid, validation_start, testing_start,
True, self.partition_var_name, 'valid')
else:
valid_statement = self.generate_splitting_code(
self.timeid, validation_start, testing_start,
False, self.partition_var_name, 'valid')
if validation_start == end_time:
train_statement = self.generate_splitting_code(
self.timeid, training_start, validation_start,
True, self.partition_var_name, 'train')
else:
train_statement = self.generate_splitting_code(
self.timeid, training_start, validation_start,
False, self.partition_var_name, 'train')
input_tbl_params = self.to_outtable_params()
input_tbl_name = input_tbl_params['name']
traintbl_name = '_'.join([input_tbl_name, traintbl_suffix])
validtbl_name = '_'.join([input_tbl_name, validtbl_suffix])
testtbl_name = '_'.join([input_tbl_name, testtbl_suffix])
splitting_code = '''
data {4} {5} {6};
set {0};
{1}
{2}
{3}
if {7} = 'train' then output {4};
if {7} = 'valid' then output {5};
if {7} = 'test' then output {6};
run;
'''.format(input_tbl_name, train_statement, valid_statement, test_statement,
traintbl_name, validtbl_name, testtbl_name, self.partition_var_name)
conn.retrieve('dataStep.runCode', _messagelevel='error', code=splitting_code)
train_out = dict(name=traintbl_name, timeid=self.timeid, groupby_var=self.groupby_var,
sequence_opt=self.sequence_opt, inputs_target=self.inputs_target)
valid_out = dict(name=validtbl_name, timeid=self.timeid, groupby_var=self.groupby_var,
sequence_opt=self.sequence_opt, inputs_target=self.inputs_target)
test_out = dict(name=testtbl_name, timeid=self.timeid, groupby_var=self.groupby_var,
sequence_opt=self.sequence_opt, inputs_target=self.inputs_target)
train_out_tbl = TimeseriesTable(**train_out)
train_out_tbl.set_connection(conn)
valid_out_tbl = TimeseriesTable(**valid_out)
valid_out_tbl.set_connection(conn)
test_out_tbl = TimeseriesTable(**test_out)
test_out_tbl.set_connection(conn)
print('NOTE: Training set has {} observations'.format(train_out_tbl.shape[0]))
print('NOTE: Validation set has {} observations'.format(valid_out_tbl.shape[0]))
print('NOTE: Testing set has {} observations'.format(test_out_tbl.shape[0]))
return train_out_tbl, valid_out_tbl, test_out_tbl
@staticmethod
def generate_splitting_code(timeid, start, end, right_inclusive,
partition_var_name, partition_val):
if (start is None) and (end is not None):
if right_inclusive:
statement = '''if {0} <= {1} then {2} = '{3}';'''.format(
timeid, end, partition_var_name, partition_val)
else:
statement = '''if {0} < {1} then {2} = '{3}';'''.format(
timeid, end, partition_var_name, partition_val)
elif (start is not None) and (end is None):
statement = '''if {0} >= {1} then {2} = '{3}';'''.format(
timeid, start, partition_var_name, partition_val)
elif (start is not None) and (end is not None):
if right_inclusive:
statement = '''if {0} >= {1} and {0} <= {2} then {3} = '{4}';'''.format(
timeid, start, end, partition_var_name, partition_val)
else:
statement = '''if {0} >= {1} and {0} < {2} then {3} = '{4}';'''.format(
timeid, start, end, partition_var_name, partition_val)
else:
statement = '''{0} = '{1}';'''.format(partition_var_name, partition_val)
return statement
@staticmethod
def convert_to_sas_time_format(python_time, sas_format_type):
if sas_format_type == 'date':
if isinstance(python_time, datetime.date):
sas_time_str = 'mdy({0},{1},{2})'.format(python_time.month,
python_time.day, python_time.year)
return sas_time_str
elif python_time is None:
return None
else:
raise ValueError('''The timeid type is date format, so the input
python time variable should be date or datetime format''')
elif sas_format_type == 'datetime':
if isinstance(python_time, datetime.datetime):
sas_time_str = 'dhms(mdy({0},{1},{2}), {3}, {4}, {5})'.format(
python_time.month, python_time.day, python_time.year,
python_time.hour, python_time.minute, python_time.second)
return sas_time_str
elif isinstance(python_time, datetime.date):
sas_time_str = 'dhms(mdy({0},{1},{2}), 0, 0, 0)'.format(
python_time.month, python_time.day, python_time.year)
return sas_time_str
elif python_time is None:
return None
else:
raise ValueError('''The timeid type is datetime format, so the input
python time variable should be date or datetime format''')
elif sas_format_type == 'double':
if isinstance(python_time, numbers.Real):
return python_time
elif python_time is None:
return None
else:
raise ValueError('''The timeid type is double, so the input
python time variable should be int or float''')
else:
raise DLPyError('''timeid format in CASTable is wrong, consider reload
the table and formatting it with timeseries_formatting''')
@staticmethod
def create_lags(varname, nlags, byvar):
if not isinstance(byvar, list):
byvar = [byvar]
byvar_strlist = ['first.{}'.format(var) for var in byvar]
sasCode = ''
for i in range(nlags):
if i == 0:
sasCode += '{0}_lag{1} = lag({0});'.format(varname, i+1)
else:
sasCode += '{0}_lag{1} = lag({0}_lag{2});'.format(varname, i+1, i)
if len(byvar) > 0:
sasCode += 'if ' + ' or '.join(byvar_strlist)
sasCode += ' then {0}_lag{1} = .;'.format(varname, i+1)
return sasCode
@staticmethod
def find_file_caslib(conn, path):
'''
Check whether the specified path is in the caslibs of the current session
Parameters
----------
conn : CAS
Specifies the CAS connection object
path : string
Specifies the name of the path.
Returns
-------
( flag, caslib_name )
flag specifies if path exist in session.
caslib_name specifies the name of the caslib that contains the path.
'''
paths = conn.caslibinfo().CASLibInfo.Path.tolist()
caslibs = conn.caslibinfo().CASLibInfo.Name.tolist()
subdirs = conn.caslibinfo().CASLibInfo.Subdirs.tolist()
server_type = get_cas_host_type(conn).lower()
if server_type.startswith("lin") or server_type.startswith("osx"):
sep = '/'
else:
sep = '\\'
for i, directory in enumerate(paths):
if path.startswith(directory) and (subdirs[i]==1):
rest_path = path[len(directory):]
caslibname = caslibs[i]
return (caslibname, rest_path)
elif path.startswith(directory) and (subdirs[i]==0):
rest_path = path[len(directory):]
if sep in rest_path:
continue
else:
caslibname = caslibs[i]
return (caslibname, rest_path)
return (None, None)
| 41.840755 | 116 | 0.570771 |
from __future__ import (print_function, division, absolute_import, unicode_literals)
from swat.cas.table import CASTable
from .utils import random_name, get_cas_host_type, char_to_double, int_to_double
from dlpy.utils import DLPyError
from swat.cas import datamsghandlers
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
import datetime
import numbers
import re
import swat
def plot_timeseries(tbl, timeid, timeseries, figure=None,
groupid=None, start_time=None, end_time=None, xlim=None,
ylim=None, xlabel=None, ylabel=None, xdate_format=None,
title=None, figsize=None,
fontsize_spec=None, **kwargs):
default_fontsize_spec = {'xlabel':16, 'ylabel':16, 'xtick':14,
'ytick':14, 'legend':14, 'title':20}
if figure is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
if fontsize_spec is not None:
default_fontsize_spec.update(fontsize_spec)
fontsize_spec = default_fontsize_spec
else:
fig, ax = figure
if fontsize_spec is None:
fontsize_spec = {}
if 'legend' not in fontsize_spec.keys():
fontsize_spec['legend'] = default_fontsize_spec['legend']
if isinstance(tbl, CASTable):
if groupid is None:
tbl = tbl.to_frame()
else:
where_clause_list = []
for gid in groupid.keys():
where_clause_list.append(gid + '=' + str(groupid[gid]))
where_clause = ' and '.join(where_clause_list)
tbl = tbl.query(where_clause)
tbl = tbl.to_frame()
else:
if isinstance(tbl, pd.Series):
timeseries = tbl.name
tbl = tbl.reset_index()
timeid = [colname for colname in tbl.columns if colname != timeseries][0]
if groupid is not None:
for gid in groupid.keys():
tbl = tbl.loc[tbl[gid]==groupid[gid]]
if not (np.issubdtype(tbl[timeid].dtype, np.integer) or
np.issubdtype(tbl[timeid].dtype, np.floating)):
tbl[timeid] = pd.to_datetime(tbl[timeid])
fig.autofmt_xdate()
if xdate_format is not None:
import matplotlib.dates as mdates
xfmt = mdates.DateFormatter(xdate_format)
ax.xaxis.set_major_formatter(xfmt)
if start_time is not None:
if isinstance(start_time, datetime.date):
start_time = pd.Timestamp(start_time)
tbl = tbl.loc[tbl[timeid]>=start_time]
if end_time is not None:
if isinstance(start_time, datetime.date):
end_time = pd.Timestamp(end_time)
tbl = tbl.loc[tbl[timeid]<=end_time]
tbl = tbl.sort_values(timeid)
ax.plot(tbl[timeid], tbl[timeseries], **kwargs)
if xlabel is not None:
if 'xlabel' in fontsize_spec.keys():
ax.set_xlabel(xlabel, fontsize=fontsize_spec['xlabel'])
else:
ax.set_xlabel(xlabel)
elif figure is not None:
if 'xlabel' in fontsize_spec.keys():
ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize_spec['xlabel'])
else:
ax.set_xlabel(timeid, fontsize=fontsize_spec['xlabel'])
if ylabel is not None:
if 'ylabel' in fontsize_spec.keys():
ax.set_ylabel(ylabel, fontsize=fontsize_spec['ylabel'])
else:
ax.set_ylabel(ylabel)
elif figure is not None:
if 'ylabel' in fontsize_spec.keys():
ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize_spec['ylabel'])
else:
ax.set_ylabel(timeseries, fontsize=fontsize_spec['ylabel'])
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
if title is not None:
if 'title' in fontsize_spec.keys():
ax.set_title(title, fontsize=fontsize_spec['title'])
else:
ax.set_title(title)
elif figure is not None:
if 'title' in fontsize_spec.keys():
ax.set_title(ax.get_title(), fontsize=fontsize_spec['title'])
ax.legend(loc='best', bbox_to_anchor=(1, 1), prop={'size': fontsize_spec['legend']})
if 'xtick' in fontsize_spec.keys():
ax.get_xaxis().set_tick_params(direction='out', labelsize=fontsize_spec['xtick'])
else:
ax.get_xaxis().set_tick_params(direction='out')
if 'ytick' in fontsize_spec.keys():
ax.get_yaxis().set_tick_params(direction='out', labelsize=fontsize_spec['ytick'])
else:
ax.get_yaxis().set_tick_params(direction='out')
return (fig, ax)
class TimeseriesTable(CASTable):
running_caslib = None
def __init__(self, name, timeid=None, groupby_var=None,
sequence_opt=None, inputs_target=None, **table_params):
CASTable.__init__(self, name, **table_params)
self.timeid = timeid
self.groupby_var = groupby_var
self.sequence_opt = sequence_opt
self.inputs_target = inputs_target
@classmethod
def from_table(cls, tbl, columns=None, casout=None):
input_tbl_params = tbl.to_outtable_params()
input_tbl_name = input_tbl_params['name']
conn = tbl.get_connection()
if casout is None:
casout_params = {}
elif isinstance(casout, CASTable):
casout_params = casout.to_outtable_params()
elif isinstance(casout, dict):
casout_params = casout
if 'name' not in casout_params:
casout_params['name'] = random_name('Timeseries', 6)
output_tbl_name = casout_params['name']
if columns is None:
keep_col_sascode = '''
data {0};
set {1};
run;
'''.format(output_tbl_name, input_tbl_name)
conn.retrieve('dataStep.runCode', _messagelevel='error',
code=keep_col_sascode)
else:
if not isinstance(columns, list):
columns = [columns]
keepcol = ' '.join(columns)
keep_col_sascode = '''
data {0};
set {1};
keep {2};
run;
'''.format(output_tbl_name, input_tbl_name, keepcol)
conn.retrieve('dataStep.runCode', _messagelevel='error',
code=keep_col_sascode)
out = cls(**casout_params)
out.set_connection(conn)
return out
@classmethod
def from_pandas(cls, conn, pandas_df, casout=None):
if isinstance(pandas_df, pd.Series):
pandas_df = pandas_df.reset_index()
if casout is None:
casout_params = {}
elif isinstance(casout, CASTable):
casout_params = casout.to_outtable_params()
elif isinstance(casout, dict):
casout_params = casout
if 'name' not in casout_params:
casout_params['name'] = random_name('Timeseries', 6)
output_tbl_name = casout_params['name']
handler = datamsghandlers.PandasDataFrame(pandas_df)
conn.addtable(table=output_tbl_name, replace=True, **handler.args.addtable)
tbl = conn.CASTable(name=output_tbl_name)
return cls.from_table(tbl, columns=None, casout=casout_params)
@classmethod
def from_localfile(cls, conn, path, columns=None, importoptions=None,
casout=None):
if casout is None:
casout_params = {}
elif isinstance(casout, CASTable):
casout_params = casout.to_outtable_params()
elif isinstance(casout, dict):
casout_params = casout
if 'name' not in casout_params:
casout_params['name'] = random_name('Timeseries', 6)
if importoptions is None:
importoptions = {}
upload_result = conn.upload(path,
importoptions=importoptions,
casout=casout_params)
tbl = conn.CASTable(**casout_params)
return cls.from_table(tbl, columns=columns, casout=casout_params)
@classmethod
def from_serverfile(cls, conn, path, columns=None, caslib=None,
importoptions=None, casout=None):
if casout is None:
casout_params = {}
elif isinstance(casout, CASTable):
casout_params = casout.to_outtable_params()
elif isinstance(casout, dict):
casout_params = casout
if 'name' not in casout_params:
casout_params['name'] = random_name('Timeseries', 6)
if importoptions is None:
importoptions = {}
if caslib is None:
caslib, rest_path = cls.find_file_caslib(conn, path)
if caslib is None:
server_type = get_cas_host_type(conn).lower()
if server_type.startswith("lin") or server_type.startswith("osx"):
path_split = path.rsplit("/", 1)
else:
path_split = path.rsplit("\\", 1)
caslib = random_name('Caslib', 6)
rt1 = conn.retrieve('addcaslib', _messagelevel='error',
name=caslib, path=path_split[0],
activeonadd=False, subdirectories=False,
datasource={'srctype':'path'})
if rt1.severity < 2:
rt2 = conn.retrieve('table.loadTable',
_messagelevel='error',
casout=casout_params,
caslib=caslib,
importoptions=importoptions,
path=path_split[1])
if rt2.severity > 1:
for msg in rt2.messages:
print(msg)
raise DLPyError('cannot load files, something is wrong!')
else:
for msg in rt1.messages:
print(msg)
raise DLPyError('''cannot create caslib with path:{},
something is wrong!'''.format(path_split[0]))
else:
rt3 = conn.retrieve('table.loadTable',
_messagelevel='error',
casout=casout_params,
caslib=caslib,
importoptions=importoptions,
path=rest_path)
if rt3.severity > 1:
for msg in rt3.messages:
print(msg)
raise DLPyError('cannot load files, something is wrong!')
else:
rt4 = conn.retrieve('table.loadTable',
_messagelevel='error',
casout=casout_params,
caslib=caslib,
importoptions=importoptions,
path=path)
if rt4.severity > 1:
for msg in rt4.messages:
print(msg)
raise DLPyError('cannot load files, something is wrong!')
tbl = conn.CASTable(**casout_params)
return cls.from_table(tbl, columns=columns, casout=casout_params)
def timeseries_formatting(self, timeid, timeseries,
timeid_informat=None, timeid_format=None,
extra_columns=None):
self.timeid = timeid
self.timeseries = timeseries
self.timeid_format = timeid_format
self.timeid_informat = timeid_informat
self.extra_columns = extra_columns
input_tbl_params = self.to_outtable_params()
input_tbl_name = input_tbl_params['name']
conn = self.get_connection()
tbl_colinfo = self.columninfo().ColumnInfo
if self.timeid_format is None:
if self.timeid_informat is None:
self.timeid_format = self.timeid_informat
elif self.timeid_informat.lower().startswith('anydtdtm'):
self.timeid_format = 'DATETIME19.'
else:
self.timeid_format = self.timeid_informat
if (((self.timeid_type not in ['double', 'date', 'datetime'])
and (not self.timeid_type.startswith('int')))
and (self.timeid_informat is not None)):
fmt_code = '''
data {0};
set {0}(rename=({1}=c_{1}));
{1} = input(c_{1},{2});
drop c_{1};
format {1} {3};
run;
'''.format(input_tbl_name, self.timeid,
self.timeid_informat, self.timeid_format)
conn.retrieve('dataStep.runCode', _messagelevel='error', code=fmt_code)
elif (((self.timeid_type not in ['double', 'date', 'datetime'])
and (not self.timeid_type.startswith('int')))
and (self.timeid_informat is None)):
raise ValueError('''timeid variable is not in the numeric format,
so timeid_informat is required for parsing the timeid variable.
''')
elif (self.timeid_format is not None):
fmt_code = '''
data {0};
set {0};
format {1} {2};
run;
'''.format(input_tbl_name, self.timeid, self.timeid_format)
conn.retrieve('dataStep.runCode', _messagelevel='error', code=fmt_code)
else:
fmt_code = '''
data {0};
set {0};
run;
'''.format(input_tbl_name)
conn.retrieve('dataStep.runCode', _messagelevel='error', code=fmt_code)
tbl_colinfo = self.columninfo().ColumnInfo
if not isinstance(self.timeseries, list):
self.timeseries = [self.timeseries]
if set(self.timeseries).issubset(tbl_colinfo.Column):
char_to_double(conn, tbl_colinfo, input_tbl_name,
input_tbl_name, self.timeseries)
else:
raise ValueError('''One or more variables specified in 'timeseries'
do not exist in the input table.
''')
if self.extra_columns is not None:
if not isinstance(self.extra_columns, list):
self.extra_columns = [self.extra_columns]
keepcol = [self.timeid]
keepcol.extend(self.timeseries + self.extra_columns)
keepcol = ' '.join(keepcol)
keep_col_sascode = '''
data {0};
set {0};
keep {1};
run;
'''.format(input_tbl_name, keepcol)
conn.retrieve('dataStep.runCode', _messagelevel='error', code=keep_col_sascode)
print('NOTE: Timeseries formatting is completed.')
def timeseries_accumlation(self, acc_interval='day',timeid=None,
timeseries=None, groupby=None,
extra_num_columns=None, default_ts_acc='sum',
default_col_acc = 'avg',
acc_method_byvar=None):
if (timeid is None) and (self.timeid is None):
raise DLPyError('''timeid is not specified, consider specifying
and formatting it with timeseries_formatting''')
elif (timeid is not None) and (timeid != self.timeid):
warnings.warn('''timeid has not been formatted by timeseries_formatting,
consider reload the data and use timeseries_formatting to format the data,
unless the data has already been pre-formatted.''')
self.timeid = timeid
if timeseries is None:
if ((hasattr(self, 'timeseries') and self.timeseries is None) or
(not hasattr(self, 'timeseries'))):
raise DLPyError('''timeseries is not specified, consider specifying
and formatting it with timeseries_formatting''')
else:
if not isinstance(timeseries, list):
timeseries = [timeseries]
if ((hasattr(self, 'timeseries') and (self.timeseries is None)) or
(not hasattr(self, 'timeseries'))):
warnings.warn('''timeseries has not been formatted by timeseries_formatting,
consider reload the data and use timeseries_formatting to format the data,
unless the data has already been pre-formatted.''')
elif not set(timeseries).issubset(self.timeseries):
warnings.warn('''timeseries contains variable(s) that has not been
formatted by timeseries_formatting, consider reload the data and use
timeseries_formatting to format the data,
unless the data has already been pre-formatted.''')
self.timeseries = timeseries
self.groupby_var = groupby
self.extra_num_columns = extra_num_columns
input_tbl_params = self.to_outtable_params()
input_tbl_name = input_tbl_params['name']
conn = self.get_connection()
conn.loadactionset('timeData')
tbl_colinfo = self.columninfo().ColumnInfo
if self.groupby_var is None:
self.groupby_var = []
elif not isinstance(self.groupby_var, list):
self.groupby_var = [self.groupby_var]
if set(self.groupby_var).issubset(tbl_colinfo.Column):
int_to_double(conn, tbl_colinfo, input_tbl_name,
input_tbl_name, self.groupby_var)
else:
raise ValueError('''One or more variables specified in 'groupby'
do not exist in the input table.
''')
tbl_colinfo = self.columninfo().ColumnInfo
if self.timeid not in tbl_colinfo.Column.values:
raise ValueError('''variable 'timeid' does not exist in input table.
''')
if not isinstance(self.timeseries, list):
self.timeseries = [self.timeseries]
if not set(self.timeseries).issubset(tbl_colinfo.Column):
raise ValueError('''One or more variables specified in 'timeseries'
do not exist in the input table.
''')
if self.extra_num_columns is None:
self.extra_num_columns = []
elif not isinstance(self.extra_num_columns, list):
self.extra_num_columns = [self.extra_num_columns]
if not set(self.extra_num_columns).issubset(tbl_colinfo.Column):
raise ValueError('''One or more variables specified in 'extra_num_columns'
do not exist in the input table.
''')
if self.timeid_type == 'datetime':
acc_interval = 'dt' + acc_interval
elif ((self.timeid_type == 'date')
and (acc_interval.lower() in ['hour', 'minute', 'second'])):
raise ValueError('''the acc_interval has higher frequency than day,
yet the timeid variable is in the date format.
''')
if acc_method_byvar is None:
acc_method_byvar = {}
serieslist = []
for ts in self.timeseries:
if ts in acc_method_byvar.keys():
method_dict = {'acc':acc_method_byvar[ts],'name':ts}
serieslist.append(method_dict)
else:
method_dict = {'acc':default_ts_acc,'name':ts}
serieslist.append(method_dict)
for extra_col in self.extra_num_columns:
if extra_col in self.timeseries:
warnings.warn('''
columns in extra_num_columns are also found in
timeseries, and will be ignored.
''')
continue
elif extra_col in acc_method_byvar.keys():
method_dict = {'acc':acc_method_byvar[extra_col],'name':extra_col}
serieslist.append(method_dict)
else:
method_dict = {'acc':default_col_acc,'name':extra_col}
serieslist.append(method_dict)
acc_result = conn.retrieve('timedata.timeseries', _messagelevel='error',
table={'groupby':self.groupby_var,'name': input_tbl_name},
series=serieslist,
timeid=self.timeid,
interval=acc_interval,
trimid='BOTH',
sumout=dict(name=input_tbl_name + '_summary', replace=True),
casout=dict(name=input_tbl_name, replace=True))
if acc_interval.startswith('dt'):
print('NOTE: Timeseries are accumulated to the frequency of {}'.format(acc_interval[2:]))
else:
print('NOTE: Timeseries are accumulated to the frequency of {}'.format(acc_interval))
def prepare_subsequences(self, seq_len, target, predictor_timeseries=None,
timeid=None, groupby=None,
input_length_name='xlen', target_length_name='ylen',
missing_handling='drop'):
tbl_colinfo = self.columninfo().ColumnInfo
input_tbl_params = self.to_outtable_params()
input_tbl_name = input_tbl_params['name']
conn = self.get_connection()
if timeid is not None:
self.timeid = timeid
elif self.timeid is None:
raise ValueError('''timeid is not specified''')
if self.timeid not in tbl_colinfo.Column.values:
raise ValueError('''timeid does not exist in the input table''')
if groupby is not None:
self.groupby_var = groupby
if self.groupby_var is None:
self.groupby_var = []
elif not isinstance(self.groupby_var, list):
self.groupby_var = [self.groupby_var]
if set(self.groupby_var).issubset(tbl_colinfo.Column):
int_to_double(conn, tbl_colinfo, input_tbl_name,
input_tbl_name, self.groupby_var)
else:
raise ValueError('''One or more variables specified in 'groupby'
do not exist in the input table.
''')
if isinstance(target, list):
if len(target) > 1:
raise DLPyError('''currently only support univariate target''')
else:
target = [target]
if predictor_timeseries is None:
predictor_timeseries = target
elif not isinstance(predictor_timeseries, list):
predictor_timeseries = [predictor_timeseries]
if set(target).issubset(predictor_timeseries):
independent_pred = [var for var in predictor_timeseries
if var not in target]
self.auto_regressive = True
else:
independent_pred = predictor_timeseries
self.auto_regressive = False
if not set(target).issubset(tbl_colinfo.Column):
raise ValueError('''invalid target variable''')
if len(independent_pred) > 0:
if not set(independent_pred).issubset(tbl_colinfo.Column):
raise ValueError('''columns in predictor_timeseries are absent from
the accumulated timeseriest table.''')
if self.timeseries is None:
warnings.warn('''timeseries has not been formatted by timeseries_formatting,
consider reload the data and use timeseries_formatting to format the data,
unless the data has already been pre-formatted.''')
else:
if not set(target).issubset(self.timeseries):
warnings.warn('''target is not in pre-formatted timeseries,
consider reload the data and use timeseries_formatting to format the data,
unless the data has already been pre-formatted.''')
if len(independent_pred) > 0:
if not set(independent_pred).issubset(self.timeseries):
warnings.warn('''
some of predictor_timeseries are not in pre-accumulated timeseries,\n
consider reload the data and use timeseries_accumulation to accumulate the data,\n
unless the data has already been pre-formatted.
''')
self.target = target[0]
self.independent_pred = independent_pred
self.seq_len = seq_len
if self.seq_len < 1:
raise ValueError('''RNN sequence length at least need to be 1''')
sasCode = 'data {0}; set {0}; by {1} {2};'.format(
input_tbl_name, ' '.join(self.groupby_var), self.timeid)
if self.seq_len > 1:
for var in self.independent_pred:
sasCode += self.create_lags(var, self.seq_len - 1, self.groupby_var)
if self.auto_regressive:
sasCode += self.create_lags(self.target, self.seq_len, self.groupby_var)
sasCode += '{0} = {1};'.format(input_length_name, self.seq_len)
sasCode += '{} = 1;'.format(target_length_name)
if missing_handling == 'drop':
sasCode += 'if not cmiss(of _all_) then output {};'.format(input_tbl_name)
sasCode += 'run;'
if len(self.groupby_var) == 0:
conn.retrieve('dataStep.runCode', _messagelevel='error', code=sasCode,
single='Yes')
else:
conn.retrieve('dataStep.runCode', _messagelevel='error', code=sasCode)
self.input_vars = []
for i in range(self.seq_len):
if self.auto_regressive:
self.input_vars.append('{0}_lag{1}'.format(self.target, i+1))
for var in self.independent_pred:
if i == 0:
self.input_vars.append(var)
else:
self.input_vars.append('{0}_lag{1}'.format(var, i))
self.input_vars.reverse()
self.tokensize = len(predictor_timeseries)
self.sequence_opt = dict(input_length=input_length_name,
target_length=target_length_name,
token_size=self.tokensize)
self.inputs_target = dict(inputs=self.input_vars,
target=self.target)
print('NOTE: timeseries subsequences are prepared with subsequence length = {}'.format(seq_len))
@property
def timeid_type(self):
tbl_colinfo = self.columninfo().ColumnInfo
timeid_type = self.identify_coltype(self.timeid, tbl_colinfo)
return timeid_type
@staticmethod
def identify_coltype(col, tbl_colinfo):
if col not in tbl_colinfo.Column.values:
raise ValueError('''variable {} does not exist in input table.
'''.format(col))
if 'Format' in tbl_colinfo.columns:
cas_timeid_fmt = tbl_colinfo.Format[tbl_colinfo.Column == col].values[0]
else:
cas_timeid_fmt = None
col_type = tbl_colinfo.Type[tbl_colinfo.Column == col].values[0]
if cas_timeid_fmt:
for pattern in swat.options.cas.dataset.date_formats:
if re.match(r'{}\Z'.format(pattern), cas_timeid_fmt):
col_type = 'date'
break
for pattern in swat.options.cas.dataset.datetime_formats:
if re.match(r'{}\Z'.format(pattern), cas_timeid_fmt):
if col_type == 'date':
raise DLPyError('''{} format in CASTable is ambiguous,
and can match both sas date and sas datetime format'''.format(col))
else:
col_type = 'datetime'
break
return col_type
def timeseries_partition(self, training_start=None, validation_start=None,
testing_start=None, end_time=None,
partition_var_name='split_id',
traintbl_suffix='train',
validtbl_suffix='valid',
testtbl_suffix='test'):
self.partition_var_name = partition_var_name
conn = self.get_connection()
training_start = self.convert_to_sas_time_format(training_start, self.timeid_type)
validation_start = self.convert_to_sas_time_format(validation_start, self.timeid_type)
testing_start = self.convert_to_sas_time_format(testing_start, self.timeid_type)
end_time = self.convert_to_sas_time_format(end_time, self.timeid_type)
if testing_start is None:
testing_start = end_time
test_statement = ';'
else:
test_statement = self.generate_splitting_code(
self.timeid, testing_start, end_time,
True, self.partition_var_name, 'test')
if validation_start is None:
validation_start = testing_start
valid_statement = ';'
else:
if testing_start == end_time:
valid_statement = self.generate_splitting_code(
self.timeid, validation_start, testing_start,
True, self.partition_var_name, 'valid')
else:
valid_statement = self.generate_splitting_code(
self.timeid, validation_start, testing_start,
False, self.partition_var_name, 'valid')
if validation_start == end_time:
train_statement = self.generate_splitting_code(
self.timeid, training_start, validation_start,
True, self.partition_var_name, 'train')
else:
train_statement = self.generate_splitting_code(
self.timeid, training_start, validation_start,
False, self.partition_var_name, 'train')
input_tbl_params = self.to_outtable_params()
input_tbl_name = input_tbl_params['name']
traintbl_name = '_'.join([input_tbl_name, traintbl_suffix])
validtbl_name = '_'.join([input_tbl_name, validtbl_suffix])
testtbl_name = '_'.join([input_tbl_name, testtbl_suffix])
splitting_code = '''
data {4} {5} {6};
set {0};
{1}
{2}
{3}
if {7} = 'train' then output {4};
if {7} = 'valid' then output {5};
if {7} = 'test' then output {6};
run;
'''.format(input_tbl_name, train_statement, valid_statement, test_statement,
traintbl_name, validtbl_name, testtbl_name, self.partition_var_name)
conn.retrieve('dataStep.runCode', _messagelevel='error', code=splitting_code)
train_out = dict(name=traintbl_name, timeid=self.timeid, groupby_var=self.groupby_var,
sequence_opt=self.sequence_opt, inputs_target=self.inputs_target)
valid_out = dict(name=validtbl_name, timeid=self.timeid, groupby_var=self.groupby_var,
sequence_opt=self.sequence_opt, inputs_target=self.inputs_target)
test_out = dict(name=testtbl_name, timeid=self.timeid, groupby_var=self.groupby_var,
sequence_opt=self.sequence_opt, inputs_target=self.inputs_target)
train_out_tbl = TimeseriesTable(**train_out)
train_out_tbl.set_connection(conn)
valid_out_tbl = TimeseriesTable(**valid_out)
valid_out_tbl.set_connection(conn)
test_out_tbl = TimeseriesTable(**test_out)
test_out_tbl.set_connection(conn)
print('NOTE: Training set has {} observations'.format(train_out_tbl.shape[0]))
print('NOTE: Validation set has {} observations'.format(valid_out_tbl.shape[0]))
print('NOTE: Testing set has {} observations'.format(test_out_tbl.shape[0]))
return train_out_tbl, valid_out_tbl, test_out_tbl
@staticmethod
def generate_splitting_code(timeid, start, end, right_inclusive,
partition_var_name, partition_val):
if (start is None) and (end is not None):
if right_inclusive:
statement = '''if {0} <= {1} then {2} = '{3}';'''.format(
timeid, end, partition_var_name, partition_val)
else:
statement = '''if {0} < {1} then {2} = '{3}';'''.format(
timeid, end, partition_var_name, partition_val)
elif (start is not None) and (end is None):
statement = '''if {0} >= {1} then {2} = '{3}';'''.format(
timeid, start, partition_var_name, partition_val)
elif (start is not None) and (end is not None):
if right_inclusive:
statement = '''if {0} >= {1} and {0} <= {2} then {3} = '{4}';'''.format(
timeid, start, end, partition_var_name, partition_val)
else:
statement = '''if {0} >= {1} and {0} < {2} then {3} = '{4}';'''.format(
timeid, start, end, partition_var_name, partition_val)
else:
statement = '''{0} = '{1}';'''.format(partition_var_name, partition_val)
return statement
@staticmethod
def convert_to_sas_time_format(python_time, sas_format_type):
if sas_format_type == 'date':
if isinstance(python_time, datetime.date):
sas_time_str = 'mdy({0},{1},{2})'.format(python_time.month,
python_time.day, python_time.year)
return sas_time_str
elif python_time is None:
return None
else:
raise ValueError('''The timeid type is date format, so the input
python time variable should be date or datetime format''')
elif sas_format_type == 'datetime':
if isinstance(python_time, datetime.datetime):
sas_time_str = 'dhms(mdy({0},{1},{2}), {3}, {4}, {5})'.format(
python_time.month, python_time.day, python_time.year,
python_time.hour, python_time.minute, python_time.second)
return sas_time_str
elif isinstance(python_time, datetime.date):
sas_time_str = 'dhms(mdy({0},{1},{2}), 0, 0, 0)'.format(
python_time.month, python_time.day, python_time.year)
return sas_time_str
elif python_time is None:
return None
else:
raise ValueError('''The timeid type is datetime format, so the input
python time variable should be date or datetime format''')
elif sas_format_type == 'double':
if isinstance(python_time, numbers.Real):
return python_time
elif python_time is None:
return None
else:
raise ValueError('''The timeid type is double, so the input
python time variable should be int or float''')
else:
raise DLPyError('''timeid format in CASTable is wrong, consider reload
the table and formatting it with timeseries_formatting''')
@staticmethod
def create_lags(varname, nlags, byvar):
if not isinstance(byvar, list):
byvar = [byvar]
byvar_strlist = ['first.{}'.format(var) for var in byvar]
sasCode = ''
for i in range(nlags):
if i == 0:
sasCode += '{0}_lag{1} = lag({0});'.format(varname, i+1)
else:
sasCode += '{0}_lag{1} = lag({0}_lag{2});'.format(varname, i+1, i)
if len(byvar) > 0:
sasCode += 'if ' + ' or '.join(byvar_strlist)
sasCode += ' then {0}_lag{1} = .;'.format(varname, i+1)
return sasCode
@staticmethod
def find_file_caslib(conn, path):
paths = conn.caslibinfo().CASLibInfo.Path.tolist()
caslibs = conn.caslibinfo().CASLibInfo.Name.tolist()
subdirs = conn.caslibinfo().CASLibInfo.Subdirs.tolist()
server_type = get_cas_host_type(conn).lower()
if server_type.startswith("lin") or server_type.startswith("osx"):
sep = '/'
else:
sep = '\\'
for i, directory in enumerate(paths):
if path.startswith(directory) and (subdirs[i]==1):
rest_path = path[len(directory):]
caslibname = caslibs[i]
return (caslibname, rest_path)
elif path.startswith(directory) and (subdirs[i]==0):
rest_path = path[len(directory):]
if sep in rest_path:
continue
else:
caslibname = caslibs[i]
return (caslibname, rest_path)
return (None, None)
| true | true |
f720ffac3d7e28046fdffc89dc587da7ce834892 | 9,152 | py | Python | tests/utils_tests/test_functional.py | Lord-Elrond/django | 178109c1734ccc16386c3e3cbae1465c7a1b8ed8 | [
"BSD-3-Clause",
"0BSD"
] | 61,676 | 2015-01-01T00:05:13.000Z | 2022-03-31T20:37:54.000Z | tests/utils_tests/test_functional.py | Lord-Elrond/django | 178109c1734ccc16386c3e3cbae1465c7a1b8ed8 | [
"BSD-3-Clause",
"0BSD"
] | 8,884 | 2015-01-01T00:12:05.000Z | 2022-03-31T19:53:11.000Z | tests/utils_tests/test_functional.py | Lord-Elrond/django | 178109c1734ccc16386c3e3cbae1465c7a1b8ed8 | [
"BSD-3-Clause",
"0BSD"
] | 33,143 | 2015-01-01T02:04:52.000Z | 2022-03-31T19:42:46.000Z | from unittest import mock
from django.test import SimpleTestCase
from django.test.utils import ignore_warnings
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.functional import cached_property, classproperty, lazy
class FunctionalTests(SimpleTestCase):
def test_lazy(self):
t = lazy(lambda: tuple(range(3)), list, tuple)
for a, b in zip(t(), range(3)):
self.assertEqual(a, b)
def test_lazy_base_class(self):
"""lazy also finds base class methods in the proxy object"""
class Base:
def base_method(self):
pass
class Klazz(Base):
pass
t = lazy(lambda: Klazz(), Klazz)()
self.assertIn('base_method', dir(t))
def test_lazy_base_class_override(self):
"""lazy finds the correct (overridden) method implementation"""
class Base:
def method(self):
return 'Base'
class Klazz(Base):
def method(self):
return 'Klazz'
t = lazy(lambda: Klazz(), Base)()
self.assertEqual(t.method(), 'Klazz')
def test_lazy_object_to_string(self):
class Klazz:
def __str__(self):
return "Î am ā Ǩlâzz."
def __bytes__(self):
return b"\xc3\x8e am \xc4\x81 binary \xc7\xa8l\xc3\xa2zz."
t = lazy(lambda: Klazz(), Klazz)()
self.assertEqual(str(t), "Î am ā Ǩlâzz.")
self.assertEqual(bytes(t), b"\xc3\x8e am \xc4\x81 binary \xc7\xa8l\xc3\xa2zz.")
def assertCachedPropertyWorks(self, attr, Class):
with self.subTest(attr=attr):
def get(source):
return getattr(source, attr)
obj = Class()
class SubClass(Class):
pass
subobj = SubClass()
# Docstring is preserved.
self.assertEqual(get(Class).__doc__, 'Here is the docstring...')
self.assertEqual(get(SubClass).__doc__, 'Here is the docstring...')
# It's cached.
self.assertEqual(get(obj), get(obj))
self.assertEqual(get(subobj), get(subobj))
# The correct value is returned.
self.assertEqual(get(obj)[0], 1)
self.assertEqual(get(subobj)[0], 1)
# State isn't shared between instances.
obj2 = Class()
subobj2 = SubClass()
self.assertNotEqual(get(obj), get(obj2))
self.assertNotEqual(get(subobj), get(subobj2))
# It behaves like a property when there's no instance.
self.assertIsInstance(get(Class), cached_property)
self.assertIsInstance(get(SubClass), cached_property)
# 'other_value' doesn't become a property.
self.assertTrue(callable(obj.other_value))
self.assertTrue(callable(subobj.other_value))
def test_cached_property(self):
"""cached_property caches its value and behaves like a property."""
class Class:
@cached_property
def value(self):
"""Here is the docstring..."""
return 1, object()
@cached_property
def __foo__(self):
"""Here is the docstring..."""
return 1, object()
def other_value(self):
"""Here is the docstring..."""
return 1, object()
other = cached_property(other_value)
attrs = ['value', 'other', '__foo__']
for attr in attrs:
self.assertCachedPropertyWorks(attr, Class)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_cached_property_name(self):
class Class:
def other_value(self):
"""Here is the docstring..."""
return 1, object()
other = cached_property(other_value, name='other')
other2 = cached_property(other_value, name='different_name')
self.assertCachedPropertyWorks('other', Class)
# An explicit name is ignored.
obj = Class()
obj.other2
self.assertFalse(hasattr(obj, 'different_name'))
def test_cached_property_name_deprecation_warning(self):
def value(self):
return 1
msg = "The name argument is deprecated as it's unnecessary as of Python 3.6."
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
cached_property(value, name='other_name')
def test_cached_property_auto_name(self):
"""
cached_property caches its value and behaves like a property
on mangled methods or when the name kwarg isn't set.
"""
class Class:
@cached_property
def __value(self):
"""Here is the docstring..."""
return 1, object()
def other_value(self):
"""Here is the docstring..."""
return 1, object()
other = cached_property(other_value)
attrs = ['_Class__value', 'other']
for attr in attrs:
self.assertCachedPropertyWorks(attr, Class)
def test_cached_property_reuse_different_names(self):
"""Disallow this case because the decorated function wouldn't be cached."""
with self.assertRaises(RuntimeError) as ctx:
class ReusedCachedProperty:
@cached_property
def a(self):
pass
b = a
self.assertEqual(
str(ctx.exception.__context__),
str(TypeError(
"Cannot assign the same cached_property to two different "
"names ('a' and 'b')."
))
)
def test_cached_property_reuse_same_name(self):
"""
Reusing a cached_property on different classes under the same name is
allowed.
"""
counter = 0
@cached_property
def _cp(_self):
nonlocal counter
counter += 1
return counter
class A:
cp = _cp
class B:
cp = _cp
a = A()
b = B()
self.assertEqual(a.cp, 1)
self.assertEqual(b.cp, 2)
self.assertEqual(a.cp, 1)
def test_cached_property_set_name_not_called(self):
cp = cached_property(lambda s: None)
class Foo:
pass
Foo.cp = cp
msg = 'Cannot use cached_property instance without calling __set_name__() on it.'
with self.assertRaisesMessage(TypeError, msg):
Foo().cp
def test_lazy_add(self):
lazy_4 = lazy(lambda: 4, int)
lazy_5 = lazy(lambda: 5, int)
self.assertEqual(lazy_4() + lazy_5(), 9)
def test_lazy_equality(self):
"""
== and != work correctly for Promises.
"""
lazy_a = lazy(lambda: 4, int)
lazy_b = lazy(lambda: 4, int)
lazy_c = lazy(lambda: 5, int)
self.assertEqual(lazy_a(), lazy_b())
self.assertNotEqual(lazy_b(), lazy_c())
def test_lazy_repr_text(self):
original_object = 'Lazy translation text'
lazy_obj = lazy(lambda: original_object, str)
self.assertEqual(repr(original_object), repr(lazy_obj()))
def test_lazy_repr_int(self):
original_object = 15
lazy_obj = lazy(lambda: original_object, int)
self.assertEqual(repr(original_object), repr(lazy_obj()))
def test_lazy_repr_bytes(self):
original_object = b'J\xc3\xbcst a str\xc3\xadng'
lazy_obj = lazy(lambda: original_object, bytes)
self.assertEqual(repr(original_object), repr(lazy_obj()))
def test_lazy_class_preparation_caching(self):
# lazy() should prepare the proxy class only once i.e. the first time
# it's used.
lazified = lazy(lambda: 0, int)
__proxy__ = lazified().__class__
with mock.patch.object(__proxy__, '__prepare_class__') as mocked:
lazified()
mocked.assert_not_called()
def test_lazy_bytes_and_str_result_classes(self):
lazy_obj = lazy(lambda: 'test', str, bytes)
msg = 'Cannot call lazy() with both bytes and text return types.'
with self.assertRaisesMessage(ValueError, msg):
lazy_obj()
def test_classproperty_getter(self):
class Foo:
foo_attr = 123
def __init__(self):
self.foo_attr = 456
@classproperty
def foo(cls):
return cls.foo_attr
class Bar:
bar = classproperty()
@bar.getter
def bar(cls):
return 123
self.assertEqual(Foo.foo, 123)
self.assertEqual(Foo().foo, 123)
self.assertEqual(Bar.bar, 123)
self.assertEqual(Bar().bar, 123)
def test_classproperty_override_getter(self):
class Foo:
@classproperty
def foo(cls):
return 123
@foo.getter
def foo(cls):
return 456
self.assertEqual(Foo.foo, 456)
self.assertEqual(Foo().foo, 456)
| 31.777778 | 89 | 0.573864 | from unittest import mock
from django.test import SimpleTestCase
from django.test.utils import ignore_warnings
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.functional import cached_property, classproperty, lazy
class FunctionalTests(SimpleTestCase):
def test_lazy(self):
t = lazy(lambda: tuple(range(3)), list, tuple)
for a, b in zip(t(), range(3)):
self.assertEqual(a, b)
def test_lazy_base_class(self):
class Base:
def base_method(self):
pass
class Klazz(Base):
pass
t = lazy(lambda: Klazz(), Klazz)()
self.assertIn('base_method', dir(t))
def test_lazy_base_class_override(self):
class Base:
def method(self):
return 'Base'
class Klazz(Base):
def method(self):
return 'Klazz'
t = lazy(lambda: Klazz(), Base)()
self.assertEqual(t.method(), 'Klazz')
def test_lazy_object_to_string(self):
class Klazz:
def __str__(self):
return "Î am ā Ǩlâzz."
def __bytes__(self):
return b"\xc3\x8e am \xc4\x81 binary \xc7\xa8l\xc3\xa2zz."
t = lazy(lambda: Klazz(), Klazz)()
self.assertEqual(str(t), "Î am ā Ǩlâzz.")
self.assertEqual(bytes(t), b"\xc3\x8e am \xc4\x81 binary \xc7\xa8l\xc3\xa2zz.")
def assertCachedPropertyWorks(self, attr, Class):
with self.subTest(attr=attr):
def get(source):
return getattr(source, attr)
obj = Class()
class SubClass(Class):
pass
subobj = SubClass()
self.assertEqual(get(Class).__doc__, 'Here is the docstring...')
self.assertEqual(get(SubClass).__doc__, 'Here is the docstring...')
self.assertEqual(get(obj), get(obj))
self.assertEqual(get(subobj), get(subobj))
# The correct value is returned.
self.assertEqual(get(obj)[0], 1)
self.assertEqual(get(subobj)[0], 1)
# State isn't shared between instances.
obj2 = Class()
subobj2 = SubClass()
self.assertNotEqual(get(obj), get(obj2))
self.assertNotEqual(get(subobj), get(subobj2))
self.assertIsInstance(get(Class), cached_property)
self.assertIsInstance(get(SubClass), cached_property)
# 'other_value' doesn't become a property.
self.assertTrue(callable(obj.other_value))
self.assertTrue(callable(subobj.other_value))
def test_cached_property(self):
class Class:
@cached_property
def value(self):
return 1, object()
@cached_property
def __foo__(self):
return 1, object()
def other_value(self):
return 1, object()
other = cached_property(other_value)
attrs = ['value', 'other', '__foo__']
for attr in attrs:
self.assertCachedPropertyWorks(attr, Class)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_cached_property_name(self):
class Class:
def other_value(self):
return 1, object()
other = cached_property(other_value, name='other')
other2 = cached_property(other_value, name='different_name')
self.assertCachedPropertyWorks('other', Class)
obj = Class()
obj.other2
self.assertFalse(hasattr(obj, 'different_name'))
def test_cached_property_name_deprecation_warning(self):
def value(self):
return 1
msg = "The name argument is deprecated as it's unnecessary as of Python 3.6."
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
cached_property(value, name='other_name')
def test_cached_property_auto_name(self):
class Class:
@cached_property
def __value(self):
return 1, object()
def other_value(self):
return 1, object()
other = cached_property(other_value)
attrs = ['_Class__value', 'other']
for attr in attrs:
self.assertCachedPropertyWorks(attr, Class)
def test_cached_property_reuse_different_names(self):
with self.assertRaises(RuntimeError) as ctx:
class ReusedCachedProperty:
@cached_property
def a(self):
pass
b = a
self.assertEqual(
str(ctx.exception.__context__),
str(TypeError(
"Cannot assign the same cached_property to two different "
"names ('a' and 'b')."
))
)
def test_cached_property_reuse_same_name(self):
counter = 0
@cached_property
def _cp(_self):
nonlocal counter
counter += 1
return counter
class A:
cp = _cp
class B:
cp = _cp
a = A()
b = B()
self.assertEqual(a.cp, 1)
self.assertEqual(b.cp, 2)
self.assertEqual(a.cp, 1)
def test_cached_property_set_name_not_called(self):
cp = cached_property(lambda s: None)
class Foo:
pass
Foo.cp = cp
msg = 'Cannot use cached_property instance without calling __set_name__() on it.'
with self.assertRaisesMessage(TypeError, msg):
Foo().cp
def test_lazy_add(self):
lazy_4 = lazy(lambda: 4, int)
lazy_5 = lazy(lambda: 5, int)
self.assertEqual(lazy_4() + lazy_5(), 9)
def test_lazy_equality(self):
lazy_a = lazy(lambda: 4, int)
lazy_b = lazy(lambda: 4, int)
lazy_c = lazy(lambda: 5, int)
self.assertEqual(lazy_a(), lazy_b())
self.assertNotEqual(lazy_b(), lazy_c())
def test_lazy_repr_text(self):
original_object = 'Lazy translation text'
lazy_obj = lazy(lambda: original_object, str)
self.assertEqual(repr(original_object), repr(lazy_obj()))
def test_lazy_repr_int(self):
original_object = 15
lazy_obj = lazy(lambda: original_object, int)
self.assertEqual(repr(original_object), repr(lazy_obj()))
def test_lazy_repr_bytes(self):
original_object = b'J\xc3\xbcst a str\xc3\xadng'
lazy_obj = lazy(lambda: original_object, bytes)
self.assertEqual(repr(original_object), repr(lazy_obj()))
def test_lazy_class_preparation_caching(self):
# lazy() should prepare the proxy class only once i.e. the first time
# it's used.
lazified = lazy(lambda: 0, int)
__proxy__ = lazified().__class__
with mock.patch.object(__proxy__, '__prepare_class__') as mocked:
lazified()
mocked.assert_not_called()
def test_lazy_bytes_and_str_result_classes(self):
lazy_obj = lazy(lambda: 'test', str, bytes)
msg = 'Cannot call lazy() with both bytes and text return types.'
with self.assertRaisesMessage(ValueError, msg):
lazy_obj()
def test_classproperty_getter(self):
class Foo:
foo_attr = 123
def __init__(self):
self.foo_attr = 456
@classproperty
def foo(cls):
return cls.foo_attr
class Bar:
bar = classproperty()
@bar.getter
def bar(cls):
return 123
self.assertEqual(Foo.foo, 123)
self.assertEqual(Foo().foo, 123)
self.assertEqual(Bar.bar, 123)
self.assertEqual(Bar().bar, 123)
def test_classproperty_override_getter(self):
class Foo:
@classproperty
def foo(cls):
return 123
@foo.getter
def foo(cls):
return 456
self.assertEqual(Foo.foo, 456)
self.assertEqual(Foo().foo, 456)
| true | true |
f7210110e7084f60ae5367f63c7dbd932a3b569e | 4,446 | py | Python | examples/batch_mode/14-burning_ship-deeper_DEM.py | GBillotey/Fractalshades | e100b12db031f016bf1a8a1f4fad9ca1c64a0302 | [
"MIT"
] | null | null | null | examples/batch_mode/14-burning_ship-deeper_DEM.py | GBillotey/Fractalshades | e100b12db031f016bf1a8a1f4fad9ca1c64a0302 | [
"MIT"
] | 1 | 2021-11-01T14:55:57.000Z | 2021-11-01T14:55:57.000Z | examples/batch_mode/14-burning_ship-deeper_DEM.py | GBillotey/Fractalshades | e100b12db031f016bf1a8a1f4fad9ca1c64a0302 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
============================
14 - Burning ship deeper DEM
============================
Plotting of a distance estimation for the Burning ship (power-2).
This zoom is deeper, featuring a miniship at 1.e-101
Reference:
`fractalshades.models.Perturbation_burning_ship`
"""
import os
import numpy as np
import fractalshades as fs
import fractalshades.models as fsm
import fractalshades.colors as fscolors
from fractalshades.postproc import (
Postproc_batch,
Continuous_iter_pp,
DEM_normal_pp,
DEM_pp,
Raw_pp,
)
from fractalshades.colors.layers import (
Color_layer,
Bool_layer,
Normal_map_layer,
Virtual_layer,
Blinn_lighting,
)
def plot(plot_dir):
fs.settings.enable_multithreading = True
fs.settings.inspect_calc = True
# A simple showcase using perturbation technique
x = '0.533551593577038561769721161491702555962775680136595415306315189524970818968817900068355227861158570104764433694'
y = '1.26175074578870311547721223871955368990255513054155186351034363459852900933566891849764050954410207620093433856'
dx = '7.072814368784043e-101'
precision = 150
nx = 2400
xy_ratio = 1.8
sign = 1.0
DEM_min = 5.e-5
zmin = 0.0
zmax = 1.0
# As this formula is non-analytic, we will 'unskew' based on the
# influencing miniship "size estimate" matrix.
has_skew = True
skew_00 = 1.3141410612942215
skew_01 = 0.8651590600810832
skew_10 = 0.6372176654581702
skew_11 = 1.1804627997751416
calc_name="Burning_ship"
colormap = fscolors.cmap_register["dawn"]
# Run the calculation
f = fsm.Perturbation_burning_ship(plot_dir)
# f.clean_up()
f.zoom(
precision=precision,
x=x,
y=y,
dx=dx,
nx=nx,
xy_ratio=xy_ratio,
theta_deg=-2.,
projection="cartesian",
antialiasing=False,
has_skew=has_skew,
skew_00=skew_00,
skew_01=skew_01,
skew_10=skew_10,
skew_11=skew_11
)
f.calc_std_div(
calc_name=calc_name,
subset=None,
max_iter=50000,
M_divergence=1.e3,
BLA_params={"eps": 1.e-6},
)
f.run()
print("has been run")
# Plot the image
pp = Postproc_batch(f, calc_name)
pp.add_postproc("continuous_iter", Continuous_iter_pp())
pp.add_postproc("distance_estimation", DEM_pp())
pp.add_postproc("interior", Raw_pp("stop_reason", func="x != 1."))
pp.add_postproc("DEM_map", DEM_normal_pp(kind="potential"))
plotter = fs.Fractal_plotter(pp)
plotter.add_layer(Bool_layer("interior", output=False))
plotter.add_layer(Normal_map_layer("DEM_map", max_slope=50, output=False))
plotter.add_layer(
Virtual_layer("continuous_iter", func=None, output=False)
)
cmap_func = lambda x: sign * np.where(
np.isinf(x),
np.log(DEM_min),
np.log(np.clip(x, DEM_min, None))
)
plotter.add_layer(Color_layer(
"distance_estimation",
func=cmap_func,
colormap=colormap,
probes_z=[zmin, zmax],
probes_kind="relative",
output=True
))
plotter["distance_estimation"].set_mask(plotter["interior"],
mask_color=(0.0, 0.22745098173618317, 0.9803921580314636))
plotter["DEM_map"].set_mask(plotter["interior"], mask_color=(0., 0., 0.))
# define the lighting and apply the shading
light = Blinn_lighting(0.4, np.array([1., 1., 1.]))
light.add_light_source(
k_diffuse=0.4,
k_specular=3.,
shininess=100.,
angles=(45., 40.),
coords=None,
color=np.array([1.0, 1.0, 0.98]))
# light.add_light_source(
# k_diffuse=0.8,
# k_specular=1.,
# shininess=40.,
# angles=(90., 20.),
# coords=None,
# color=np.array([1., 1., 1.]))
plotter["distance_estimation"].shade(plotter["DEM_map"], light)
plotter.plot()
if __name__ == "__main__":
# Some magic to get the directory for plotting: with a name that matches
# the file or a temporary dir if we are building the documentation
try:
realpath = os.path.realpath(__file__)
plot_dir = os.path.splitext(realpath)[0]
plot(plot_dir)
except NameError:
import tempfile
with tempfile.TemporaryDirectory() as plot_dir:
fs.utils.exec_no_output(plot, plot_dir)
| 27.7875 | 123 | 0.639226 |
import os
import numpy as np
import fractalshades as fs
import fractalshades.models as fsm
import fractalshades.colors as fscolors
from fractalshades.postproc import (
Postproc_batch,
Continuous_iter_pp,
DEM_normal_pp,
DEM_pp,
Raw_pp,
)
from fractalshades.colors.layers import (
Color_layer,
Bool_layer,
Normal_map_layer,
Virtual_layer,
Blinn_lighting,
)
def plot(plot_dir):
fs.settings.enable_multithreading = True
fs.settings.inspect_calc = True
x = '0.533551593577038561769721161491702555962775680136595415306315189524970818968817900068355227861158570104764433694'
y = '1.26175074578870311547721223871955368990255513054155186351034363459852900933566891849764050954410207620093433856'
dx = '7.072814368784043e-101'
precision = 150
nx = 2400
xy_ratio = 1.8
sign = 1.0
DEM_min = 5.e-5
zmin = 0.0
zmax = 1.0
has_skew = True
skew_00 = 1.3141410612942215
skew_01 = 0.8651590600810832
skew_10 = 0.6372176654581702
skew_11 = 1.1804627997751416
calc_name="Burning_ship"
colormap = fscolors.cmap_register["dawn"]
f = fsm.Perturbation_burning_ship(plot_dir)
f.zoom(
precision=precision,
x=x,
y=y,
dx=dx,
nx=nx,
xy_ratio=xy_ratio,
theta_deg=-2.,
projection="cartesian",
antialiasing=False,
has_skew=has_skew,
skew_00=skew_00,
skew_01=skew_01,
skew_10=skew_10,
skew_11=skew_11
)
f.calc_std_div(
calc_name=calc_name,
subset=None,
max_iter=50000,
M_divergence=1.e3,
BLA_params={"eps": 1.e-6},
)
f.run()
print("has been run")
pp = Postproc_batch(f, calc_name)
pp.add_postproc("continuous_iter", Continuous_iter_pp())
pp.add_postproc("distance_estimation", DEM_pp())
pp.add_postproc("interior", Raw_pp("stop_reason", func="x != 1."))
pp.add_postproc("DEM_map", DEM_normal_pp(kind="potential"))
plotter = fs.Fractal_plotter(pp)
plotter.add_layer(Bool_layer("interior", output=False))
plotter.add_layer(Normal_map_layer("DEM_map", max_slope=50, output=False))
plotter.add_layer(
Virtual_layer("continuous_iter", func=None, output=False)
)
cmap_func = lambda x: sign * np.where(
np.isinf(x),
np.log(DEM_min),
np.log(np.clip(x, DEM_min, None))
)
plotter.add_layer(Color_layer(
"distance_estimation",
func=cmap_func,
colormap=colormap,
probes_z=[zmin, zmax],
probes_kind="relative",
output=True
))
plotter["distance_estimation"].set_mask(plotter["interior"],
mask_color=(0.0, 0.22745098173618317, 0.9803921580314636))
plotter["DEM_map"].set_mask(plotter["interior"], mask_color=(0., 0., 0.))
light = Blinn_lighting(0.4, np.array([1., 1., 1.]))
light.add_light_source(
k_diffuse=0.4,
k_specular=3.,
shininess=100.,
angles=(45., 40.),
coords=None,
color=np.array([1.0, 1.0, 0.98]))
plotter["distance_estimation"].shade(plotter["DEM_map"], light)
plotter.plot()
if __name__ == "__main__":
try:
realpath = os.path.realpath(__file__)
plot_dir = os.path.splitext(realpath)[0]
plot(plot_dir)
except NameError:
import tempfile
with tempfile.TemporaryDirectory() as plot_dir:
fs.utils.exec_no_output(plot, plot_dir)
| true | true |
f721011b4e470373ce2d983fc11e2f51ebcc9318 | 2,154 | py | Python | mean_var_std.py | jmacdonald2010/mean-variance-standard-deviation-calculator | badae42c099081610fd55ea5a788867c352da6c0 | [
"MIT"
] | null | null | null | mean_var_std.py | jmacdonald2010/mean-variance-standard-deviation-calculator | badae42c099081610fd55ea5a788867c352da6c0 | [
"MIT"
] | null | null | null | mean_var_std.py | jmacdonald2010/mean-variance-standard-deviation-calculator | badae42c099081610fd55ea5a788867c352da6c0 | [
"MIT"
] | null | null | null | import numpy as np
def calculate(list):
if len(list) != 9:
raise ValueError('List must contain nine numbers.')
input_array = np.array([[list[0], list[1], list[2]], [list[3], list[4], list[5]], [list[6], list[7], list[8]]])
calculations = dict()
print(input_array)
# calc mean
c_mean = np.mean(input_array, axis=0) # axis 0 is column
r_mean = np.mean(input_array, axis=1)
f_mean = np.mean(input_array)
calculations['mean'] = [c_mean.tolist(), r_mean.tolist(), f_mean]
# variance
c_var = np.var(input_array, axis=0)
r_var = np.var(input_array, axis=1)
f_var = np.var(input_array)
calculations['variance'] = [c_var.tolist(), r_var.tolist(), f_var]
# standard dev
c_std = np.std(input_array, axis=0)
r_std = np.std(input_array, axis=1)
f_std = np.std(input_array)
calculations['standard deviation'] = [c_std.tolist(), r_std.tolist(), f_std]
# max
c_max = np.amax(input_array, axis=0)
r_max = np.amax(input_array, axis=1)
f_max = np.amax(input_array)
calculations['max'] = [c_max.tolist(), r_max.tolist(), f_max]
# min
c_min = np.amin(input_array, axis=0)
r_min = np.amin(input_array, axis=1)
f_min = np.amin(input_array)
calculations['min'] = [c_min.tolist(), r_min.tolist(), f_min]
# sum
c_sum = np.sum(input_array, axis=0)
r_sum = np.sum(input_array, axis=1)
f_sum = np.sum(input_array)
calculations['sum'] = [c_sum.tolist(), r_sum.tolist(), f_sum]
return calculations
# this code below is for testing the function, and what the dict should look like when outputting data
# test calculations
print(calculate([0,1,2,3,4,5,6,7,8]))
# should return:
'''
{
'mean': [[3.0, 4.0, 5.0], [1.0, 4.0, 7.0], 4.0],
'variance': [[6.0, 6.0, 6.0], [0.6666666666666666, 0.6666666666666666, 0.6666666666666666], 6.666666666666667],
'standard deviation': [[2.449489742783178, 2.449489742783178, 2.449489742783178], [0.816496580927726, 0.816496580927726, 0.816496580927726], 2.581988897471611],
'max': [[6, 7, 8], [2, 5, 8], 8],
'min': [[0, 1, 2], [0, 3, 6], 0],
'sum': [[9, 12, 15], [3, 12, 21], 36]
}''' | 35.9 | 162 | 0.633705 | import numpy as np
def calculate(list):
if len(list) != 9:
raise ValueError('List must contain nine numbers.')
input_array = np.array([[list[0], list[1], list[2]], [list[3], list[4], list[5]], [list[6], list[7], list[8]]])
calculations = dict()
print(input_array)
c_mean = np.mean(input_array, axis=0)
r_mean = np.mean(input_array, axis=1)
f_mean = np.mean(input_array)
calculations['mean'] = [c_mean.tolist(), r_mean.tolist(), f_mean]
c_var = np.var(input_array, axis=0)
r_var = np.var(input_array, axis=1)
f_var = np.var(input_array)
calculations['variance'] = [c_var.tolist(), r_var.tolist(), f_var]
c_std = np.std(input_array, axis=0)
r_std = np.std(input_array, axis=1)
f_std = np.std(input_array)
calculations['standard deviation'] = [c_std.tolist(), r_std.tolist(), f_std]
c_max = np.amax(input_array, axis=0)
r_max = np.amax(input_array, axis=1)
f_max = np.amax(input_array)
calculations['max'] = [c_max.tolist(), r_max.tolist(), f_max]
c_min = np.amin(input_array, axis=0)
r_min = np.amin(input_array, axis=1)
f_min = np.amin(input_array)
calculations['min'] = [c_min.tolist(), r_min.tolist(), f_min]
c_sum = np.sum(input_array, axis=0)
r_sum = np.sum(input_array, axis=1)
f_sum = np.sum(input_array)
calculations['sum'] = [c_sum.tolist(), r_sum.tolist(), f_sum]
return calculations
print(calculate([0,1,2,3,4,5,6,7,8]))
| true | true |
f7210156036c5232eb883f6a274abc49ea56fb3e | 154 | py | Python | src/wsgi.py | mononobi/charma-server | ed90f5ec0b5ff3996232d5fe49a4f77f96d82ced | [
"BSD-3-Clause"
] | 1 | 2020-01-16T23:36:10.000Z | 2020-01-16T23:36:10.000Z | src/wsgi.py | mononobi/imovie-server | ed90f5ec0b5ff3996232d5fe49a4f77f96d82ced | [
"BSD-3-Clause"
] | 24 | 2020-06-08T18:27:04.000Z | 2021-06-06T12:01:39.000Z | src/wsgi.py | mononobi/charma-server | ed90f5ec0b5ff3996232d5fe49a4f77f96d82ced | [
"BSD-3-Clause"
] | 1 | 2020-12-20T05:29:04.000Z | 2020-12-20T05:29:04.000Z | # -*- coding: utf-8 -*-
"""
wsgi module.
"""
from charma import CharmaApplication
app = CharmaApplication()
if __name__ == '__main__':
app.run()
| 11 | 36 | 0.62987 |
from charma import CharmaApplication
app = CharmaApplication()
if __name__ == '__main__':
app.run()
| true | true |
f721018bc2069beaa9e6763bc79cdfced921521d | 667 | py | Python | examples/pipelayer_microservice/src/service/api/__init__.py | greater-than/PipeLayer | 569f43b65992f8a32079835585b864d5fe0bb251 | [
"BSD-2-Clause"
] | 61 | 2021-02-03T02:54:18.000Z | 2021-12-26T11:38:51.000Z | examples/pipelayer_microservice/src/service/api/__init__.py | greater-than/PipeLayer | 569f43b65992f8a32079835585b864d5fe0bb251 | [
"BSD-2-Clause"
] | 1 | 2021-02-16T13:58:33.000Z | 2021-02-18T12:56:32.000Z | examples/pipelayer_microservice/src/service/api/__init__.py | greater-than/PipeLayer | 569f43b65992f8a32079835585b864d5fe0bb251 | [
"BSD-2-Clause"
] | null | null | null | from logging import Logger
from typing import cast
from service.exception import ResponseException
def handle_exception(e: Exception, log: Logger = Logger("Error Logger")) -> dict:
log.error("Error")
if isinstance(e, [ResponseException]):
e: ResponseException = cast(ResponseException, e)
log.error("{str(e)}", exc_info=e, http_status_code=e.http_status_code)
return {
"statusCode": e.http_status_code,
"message": str(e)
}
else:
log.error("Unhandled Exception", exc_info=e)
return {
"statusCode": 500,
"message": "An unhandled exception occured"
}
| 30.318182 | 81 | 0.626687 | from logging import Logger
from typing import cast
from service.exception import ResponseException
def handle_exception(e: Exception, log: Logger = Logger("Error Logger")) -> dict:
log.error("Error")
if isinstance(e, [ResponseException]):
e: ResponseException = cast(ResponseException, e)
log.error("{str(e)}", exc_info=e, http_status_code=e.http_status_code)
return {
"statusCode": e.http_status_code,
"message": str(e)
}
else:
log.error("Unhandled Exception", exc_info=e)
return {
"statusCode": 500,
"message": "An unhandled exception occured"
}
| true | true |
f72101a1cd9b75e2e0ec0b24d9f3753fae5048d3 | 70,980 | py | Python | azure-devops/azure/devops/v5_1/work/work_client.py | imafidon2020/azure-devops-python-api | ea9075f0c54dbc10115a23a8b7ad34feacbbdc14 | [
"MIT"
] | 248 | 2019-05-10T14:20:24.000Z | 2022-03-29T12:17:27.000Z | azure-devops/azure/devops/v5_1/work/work_client.py | AzureMentor/azure-devops-python-api | 3838e91d662dba1f77b43ad560ca23c1cb7e84e8 | [
"MIT"
] | 147 | 2019-05-08T14:20:49.000Z | 2022-03-28T19:36:21.000Z | azure-devops/azure/devops/v5_1/work/work_client.py | AzureMentor/azure-devops-python-api | 3838e91d662dba1f77b43ad560ca23c1cb7e84e8 | [
"MIT"
] | 121 | 2019-05-08T06:24:39.000Z | 2022-03-01T12:58:02.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class WorkClient(Client):
"""Work
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(WorkClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '1d4f49f9-02b9-4e26-b826-2cdb6195f2a9'
def get_backlog_configurations(self, team_context):
"""GetBacklogConfigurations.
Gets backlog configuration for a team
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:rtype: :class:`<BacklogConfiguration> <azure.devops.v5_1.work.models.BacklogConfiguration>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
response = self._send(http_method='GET',
location_id='7799f497-3cb5-4f16-ad4f-5cd06012db64',
version='5.1',
route_values=route_values)
return self._deserialize('BacklogConfiguration', response)
def get_backlog_level_work_items(self, team_context, backlog_id):
"""GetBacklogLevelWorkItems.
[Preview API] Get a list of work items within a backlog level
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str backlog_id:
:rtype: :class:`<BacklogLevelWorkItems> <azure.devops.v5_1.work.models.BacklogLevelWorkItems>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if backlog_id is not None:
route_values['backlogId'] = self._serialize.url('backlog_id', backlog_id, 'str')
response = self._send(http_method='GET',
location_id='7c468d96-ab1d-4294-a360-92f07e9ccd98',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('BacklogLevelWorkItems', response)
def get_backlog(self, team_context, id):
"""GetBacklog.
[Preview API] Get a backlog level
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str id: The id of the backlog level
:rtype: :class:`<BacklogLevelConfiguration> <azure.devops.v5_1.work.models.BacklogLevelConfiguration>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
response = self._send(http_method='GET',
location_id='a93726f9-7867-4e38-b4f2-0bfafc2f6a94',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('BacklogLevelConfiguration', response)
def get_backlogs(self, team_context):
"""GetBacklogs.
[Preview API] List all backlog levels
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:rtype: [BacklogLevelConfiguration]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
response = self._send(http_method='GET',
location_id='a93726f9-7867-4e38-b4f2-0bfafc2f6a94',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('[BacklogLevelConfiguration]', self._unwrap_collection(response))
def get_column_suggested_values(self, project=None):
"""GetColumnSuggestedValues.
Get available board columns in a project
:param str project: Project ID or project name
:rtype: [BoardSuggestedValue]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='eb7ec5a3-1ba3-4fd1-b834-49a5a387e57d',
version='5.1',
route_values=route_values)
return self._deserialize('[BoardSuggestedValue]', self._unwrap_collection(response))
def get_board_mapping_parent_items(self, team_context, child_backlog_context_category_ref_name, workitem_ids):
"""GetBoardMappingParentItems.
[Preview API] Returns the list of parent field filter model for the given list of workitem ids
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str child_backlog_context_category_ref_name:
:param [int] workitem_ids:
:rtype: [ParentChildWIMap]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
query_parameters = {}
if child_backlog_context_category_ref_name is not None:
query_parameters['childBacklogContextCategoryRefName'] = self._serialize.query('child_backlog_context_category_ref_name', child_backlog_context_category_ref_name, 'str')
if workitem_ids is not None:
workitem_ids = ",".join(map(str, workitem_ids))
query_parameters['workitemIds'] = self._serialize.query('workitem_ids', workitem_ids, 'str')
response = self._send(http_method='GET',
location_id='186abea3-5c35-432f-9e28-7a15b4312a0e',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[ParentChildWIMap]', self._unwrap_collection(response))
def get_row_suggested_values(self, project=None):
"""GetRowSuggestedValues.
Get available board rows in a project
:param str project: Project ID or project name
:rtype: [BoardSuggestedValue]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='bb494cc6-a0f5-4c6c-8dca-ea6912e79eb9',
version='5.1',
route_values=route_values)
return self._deserialize('[BoardSuggestedValue]', self._unwrap_collection(response))
def get_board(self, team_context, id):
"""GetBoard.
Get board
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str id: identifier for board, either board's backlog level name (Eg:"Stories") or Id
:rtype: :class:`<Board> <azure.devops.v5_1.work.models.Board>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
response = self._send(http_method='GET',
location_id='23ad19fc-3b8e-4877-8462-b3f92bc06b40',
version='5.1',
route_values=route_values)
return self._deserialize('Board', response)
def get_boards(self, team_context):
"""GetBoards.
Get boards
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:rtype: [BoardReference]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
response = self._send(http_method='GET',
location_id='23ad19fc-3b8e-4877-8462-b3f92bc06b40',
version='5.1',
route_values=route_values)
return self._deserialize('[BoardReference]', self._unwrap_collection(response))
def set_board_options(self, options, team_context, id):
"""SetBoardOptions.
Update board options
:param {str} options: options to updated
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str id: identifier for board, either category plural name (Eg:"Stories") or guid
:rtype: {str}
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
content = self._serialize.body(options, '{str}')
response = self._send(http_method='PUT',
location_id='23ad19fc-3b8e-4877-8462-b3f92bc06b40',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('{str}', self._unwrap_collection(response))
def get_board_user_settings(self, team_context, board):
"""GetBoardUserSettings.
[Preview API] Get board user settings for a board id
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board: Board ID or Name
:rtype: :class:`<BoardUserSettings> <azure.devops.v5_1.work.models.BoardUserSettings>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
response = self._send(http_method='GET',
location_id='b30d9f58-1891-4b0a-b168-c46408f919b0',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('BoardUserSettings', response)
def update_board_user_settings(self, board_user_settings, team_context, board):
"""UpdateBoardUserSettings.
[Preview API] Update board user settings for the board id
:param {str} board_user_settings:
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board:
:rtype: :class:`<BoardUserSettings> <azure.devops.v5_1.work.models.BoardUserSettings>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
content = self._serialize.body(board_user_settings, '{str}')
response = self._send(http_method='PATCH',
location_id='b30d9f58-1891-4b0a-b168-c46408f919b0',
version='5.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('BoardUserSettings', response)
def get_capacities_with_identity_ref(self, team_context, iteration_id):
"""GetCapacitiesWithIdentityRef.
Get a team's capacity
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:rtype: [TeamMemberCapacityIdentityRef]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str')
response = self._send(http_method='GET',
location_id='74412d15-8c1a-4352-a48d-ef1ed5587d57',
version='5.1',
route_values=route_values)
return self._deserialize('[TeamMemberCapacityIdentityRef]', self._unwrap_collection(response))
def get_capacity_with_identity_ref(self, team_context, iteration_id, team_member_id):
"""GetCapacityWithIdentityRef.
Get a team member's capacity
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:param str team_member_id: ID of the team member
:rtype: :class:`<TeamMemberCapacityIdentityRef> <azure.devops.v5_1.work.models.TeamMemberCapacityIdentityRef>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str')
if team_member_id is not None:
route_values['teamMemberId'] = self._serialize.url('team_member_id', team_member_id, 'str')
response = self._send(http_method='GET',
location_id='74412d15-8c1a-4352-a48d-ef1ed5587d57',
version='5.1',
route_values=route_values)
return self._deserialize('TeamMemberCapacityIdentityRef', response)
def replace_capacities_with_identity_ref(self, capacities, team_context, iteration_id):
"""ReplaceCapacitiesWithIdentityRef.
Replace a team's capacity
:param [TeamMemberCapacityIdentityRef] capacities: Team capacity to replace
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:rtype: [TeamMemberCapacityIdentityRef]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str')
content = self._serialize.body(capacities, '[TeamMemberCapacityIdentityRef]')
response = self._send(http_method='PUT',
location_id='74412d15-8c1a-4352-a48d-ef1ed5587d57',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('[TeamMemberCapacityIdentityRef]', self._unwrap_collection(response))
def update_capacity_with_identity_ref(self, patch, team_context, iteration_id, team_member_id):
"""UpdateCapacityWithIdentityRef.
Update a team member's capacity
:param :class:`<CapacityPatch> <azure.devops.v5_1.work.models.CapacityPatch>` patch: Updated capacity
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:param str team_member_id: ID of the team member
:rtype: :class:`<TeamMemberCapacityIdentityRef> <azure.devops.v5_1.work.models.TeamMemberCapacityIdentityRef>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str')
if team_member_id is not None:
route_values['teamMemberId'] = self._serialize.url('team_member_id', team_member_id, 'str')
content = self._serialize.body(patch, 'CapacityPatch')
response = self._send(http_method='PATCH',
location_id='74412d15-8c1a-4352-a48d-ef1ed5587d57',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('TeamMemberCapacityIdentityRef', response)
def get_board_card_rule_settings(self, team_context, board):
"""GetBoardCardRuleSettings.
Get board card Rule settings for the board id or board by name
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board:
:rtype: :class:`<BoardCardRuleSettings> <azure.devops.v5_1.work.models.BoardCardRuleSettings>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
response = self._send(http_method='GET',
location_id='b044a3d9-02ea-49c7-91a1-b730949cc896',
version='5.1',
route_values=route_values)
return self._deserialize('BoardCardRuleSettings', response)
def update_board_card_rule_settings(self, board_card_rule_settings, team_context, board):
"""UpdateBoardCardRuleSettings.
Update board card Rule settings for the board id or board by name
:param :class:`<BoardCardRuleSettings> <azure.devops.v5_1.work.models.BoardCardRuleSettings>` board_card_rule_settings:
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board:
:rtype: :class:`<BoardCardRuleSettings> <azure.devops.v5_1.work.models.BoardCardRuleSettings>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
content = self._serialize.body(board_card_rule_settings, 'BoardCardRuleSettings')
response = self._send(http_method='PATCH',
location_id='b044a3d9-02ea-49c7-91a1-b730949cc896',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('BoardCardRuleSettings', response)
def update_taskboard_card_rule_settings(self, board_card_rule_settings, team_context):
"""UpdateTaskboardCardRuleSettings.
[Preview API] Update taskboard card Rule settings
:param :class:`<BoardCardRuleSettings> <azure.devops.v5_1.work.models.BoardCardRuleSettings>` board_card_rule_settings:
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
content = self._serialize.body(board_card_rule_settings, 'BoardCardRuleSettings')
self._send(http_method='PATCH',
location_id='3f84a8d1-1aab-423e-a94b-6dcbdcca511f',
version='5.1-preview.2',
route_values=route_values,
content=content)
def get_board_card_settings(self, team_context, board):
"""GetBoardCardSettings.
Get board card settings for the board id or board by name
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board:
:rtype: :class:`<BoardCardSettings> <azure.devops.v5_1.work.models.BoardCardSettings>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
response = self._send(http_method='GET',
location_id='07c3b467-bc60-4f05-8e34-599ce288fafc',
version='5.1',
route_values=route_values)
return self._deserialize('BoardCardSettings', response)
def update_board_card_settings(self, board_card_settings_to_save, team_context, board):
"""UpdateBoardCardSettings.
Update board card settings for the board id or board by name
:param :class:`<BoardCardSettings> <azure.devops.v5_1.work.models.BoardCardSettings>` board_card_settings_to_save:
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board:
:rtype: :class:`<BoardCardSettings> <azure.devops.v5_1.work.models.BoardCardSettings>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
content = self._serialize.body(board_card_settings_to_save, 'BoardCardSettings')
response = self._send(http_method='PUT',
location_id='07c3b467-bc60-4f05-8e34-599ce288fafc',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('BoardCardSettings', response)
def update_taskboard_card_settings(self, board_card_settings_to_save, team_context):
"""UpdateTaskboardCardSettings.
[Preview API] Update taskboard card settings
:param :class:`<BoardCardSettings> <azure.devops.v5_1.work.models.BoardCardSettings>` board_card_settings_to_save:
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
content = self._serialize.body(board_card_settings_to_save, 'BoardCardSettings')
self._send(http_method='PUT',
location_id='0d63745f-31f3-4cf3-9056-2a064e567637',
version='5.1-preview.2',
route_values=route_values,
content=content)
def get_board_chart(self, team_context, board, name):
"""GetBoardChart.
Get a board chart
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board: Identifier for board, either board's backlog level name (Eg:"Stories") or Id
:param str name: The chart name
:rtype: :class:`<BoardChart> <azure.devops.v5_1.work.models.BoardChart>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
if name is not None:
route_values['name'] = self._serialize.url('name', name, 'str')
response = self._send(http_method='GET',
location_id='45fe888c-239e-49fd-958c-df1a1ab21d97',
version='5.1',
route_values=route_values)
return self._deserialize('BoardChart', response)
def get_board_charts(self, team_context, board):
"""GetBoardCharts.
Get board charts
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board: Identifier for board, either board's backlog level name (Eg:"Stories") or Id
:rtype: [BoardChartReference]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
response = self._send(http_method='GET',
location_id='45fe888c-239e-49fd-958c-df1a1ab21d97',
version='5.1',
route_values=route_values)
return self._deserialize('[BoardChartReference]', self._unwrap_collection(response))
def update_board_chart(self, chart, team_context, board, name):
"""UpdateBoardChart.
Update a board chart
:param :class:`<BoardChart> <azure.devops.v5_1.work.models.BoardChart>` chart:
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board: Identifier for board, either board's backlog level name (Eg:"Stories") or Id
:param str name: The chart name
:rtype: :class:`<BoardChart> <azure.devops.v5_1.work.models.BoardChart>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
if name is not None:
route_values['name'] = self._serialize.url('name', name, 'str')
content = self._serialize.body(chart, 'BoardChart')
response = self._send(http_method='PATCH',
location_id='45fe888c-239e-49fd-958c-df1a1ab21d97',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('BoardChart', response)
def get_board_columns(self, team_context, board):
"""GetBoardColumns.
Get columns on a board
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board: Name or ID of the specific board
:rtype: [BoardColumn]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
response = self._send(http_method='GET',
location_id='c555d7ff-84e1-47df-9923-a3fe0cd8751b',
version='5.1',
route_values=route_values)
return self._deserialize('[BoardColumn]', self._unwrap_collection(response))
def update_board_columns(self, board_columns, team_context, board):
"""UpdateBoardColumns.
Update columns on a board
:param [BoardColumn] board_columns: List of board columns to update
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board: Name or ID of the specific board
:rtype: [BoardColumn]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
content = self._serialize.body(board_columns, '[BoardColumn]')
response = self._send(http_method='PUT',
location_id='c555d7ff-84e1-47df-9923-a3fe0cd8751b',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('[BoardColumn]', self._unwrap_collection(response))
def get_delivery_timeline_data(self, project, id, revision=None, start_date=None, end_date=None):
"""GetDeliveryTimelineData.
Get Delivery View Data
:param str project: Project ID or project name
:param str id: Identifier for delivery view
:param int revision: Revision of the plan for which you want data. If the current plan is a different revision you will get an ViewRevisionMismatchException exception. If you do not supply a revision you will get data for the latest revision.
:param datetime start_date: The start date of timeline
:param datetime end_date: The end date of timeline
:rtype: :class:`<DeliveryViewData> <azure.devops.v5_1.work.models.DeliveryViewData>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
query_parameters = {}
if revision is not None:
query_parameters['revision'] = self._serialize.query('revision', revision, 'int')
if start_date is not None:
query_parameters['startDate'] = self._serialize.query('start_date', start_date, 'iso-8601')
if end_date is not None:
query_parameters['endDate'] = self._serialize.query('end_date', end_date, 'iso-8601')
response = self._send(http_method='GET',
location_id='bdd0834e-101f-49f0-a6ae-509f384a12b4',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('DeliveryViewData', response)
def delete_team_iteration(self, team_context, id):
"""DeleteTeamIteration.
Delete a team's iteration by iterationId
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str id: ID of the iteration
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
self._send(http_method='DELETE',
location_id='c9175577-28a1-4b06-9197-8636af9f64ad',
version='5.1',
route_values=route_values)
def get_team_iteration(self, team_context, id):
"""GetTeamIteration.
Get team's iteration by iterationId
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str id: ID of the iteration
:rtype: :class:`<TeamSettingsIteration> <azure.devops.v5_1.work.models.TeamSettingsIteration>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
response = self._send(http_method='GET',
location_id='c9175577-28a1-4b06-9197-8636af9f64ad',
version='5.1',
route_values=route_values)
return self._deserialize('TeamSettingsIteration', response)
def get_team_iterations(self, team_context, timeframe=None):
"""GetTeamIterations.
Get a team's iterations using timeframe filter
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str timeframe: A filter for which iterations are returned based on relative time. Only Current is supported currently.
:rtype: [TeamSettingsIteration]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
query_parameters = {}
if timeframe is not None:
query_parameters['$timeframe'] = self._serialize.query('timeframe', timeframe, 'str')
response = self._send(http_method='GET',
location_id='c9175577-28a1-4b06-9197-8636af9f64ad',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[TeamSettingsIteration]', self._unwrap_collection(response))
def post_team_iteration(self, iteration, team_context):
"""PostTeamIteration.
Add an iteration to the team
:param :class:`<TeamSettingsIteration> <azure.devops.v5_1.work.models.TeamSettingsIteration>` iteration: Iteration to add
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:rtype: :class:`<TeamSettingsIteration> <azure.devops.v5_1.work.models.TeamSettingsIteration>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
content = self._serialize.body(iteration, 'TeamSettingsIteration')
response = self._send(http_method='POST',
location_id='c9175577-28a1-4b06-9197-8636af9f64ad',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('TeamSettingsIteration', response)
def create_plan(self, posted_plan, project):
"""CreatePlan.
Add a new plan for the team
:param :class:`<CreatePlan> <azure.devops.v5_1.work.models.CreatePlan>` posted_plan: Plan definition
:param str project: Project ID or project name
:rtype: :class:`<Plan> <azure.devops.v5_1.work.models.Plan>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(posted_plan, 'CreatePlan')
response = self._send(http_method='POST',
location_id='0b42cb47-cd73-4810-ac90-19c9ba147453',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('Plan', response)
def delete_plan(self, project, id):
"""DeletePlan.
Delete the specified plan
:param str project: Project ID or project name
:param str id: Identifier of the plan
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
self._send(http_method='DELETE',
location_id='0b42cb47-cd73-4810-ac90-19c9ba147453',
version='5.1',
route_values=route_values)
def get_plan(self, project, id):
"""GetPlan.
Get the information for the specified plan
:param str project: Project ID or project name
:param str id: Identifier of the plan
:rtype: :class:`<Plan> <azure.devops.v5_1.work.models.Plan>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
response = self._send(http_method='GET',
location_id='0b42cb47-cd73-4810-ac90-19c9ba147453',
version='5.1',
route_values=route_values)
return self._deserialize('Plan', response)
def get_plans(self, project):
"""GetPlans.
Get the information for all the plans configured for the given team
:param str project: Project ID or project name
:rtype: [Plan]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='0b42cb47-cd73-4810-ac90-19c9ba147453',
version='5.1',
route_values=route_values)
return self._deserialize('[Plan]', self._unwrap_collection(response))
def update_plan(self, updated_plan, project, id):
"""UpdatePlan.
Update the information for the specified plan
:param :class:`<UpdatePlan> <azure.devops.v5_1.work.models.UpdatePlan>` updated_plan: Plan definition to be updated
:param str project: Project ID or project name
:param str id: Identifier of the plan
:rtype: :class:`<Plan> <azure.devops.v5_1.work.models.Plan>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
content = self._serialize.body(updated_plan, 'UpdatePlan')
response = self._send(http_method='PUT',
location_id='0b42cb47-cd73-4810-ac90-19c9ba147453',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('Plan', response)
def get_process_configuration(self, project):
"""GetProcessConfiguration.
[Preview API] Get process configuration
:param str project: Project ID or project name
:rtype: :class:`<ProcessConfiguration> <azure.devops.v5_1.work.models.ProcessConfiguration>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='f901ba42-86d2-4b0c-89c1-3f86d06daa84',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('ProcessConfiguration', response)
def get_board_rows(self, team_context, board):
"""GetBoardRows.
Get rows on a board
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board: Name or ID of the specific board
:rtype: [BoardRow]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
response = self._send(http_method='GET',
location_id='0863355d-aefd-4d63-8669-984c9b7b0e78',
version='5.1',
route_values=route_values)
return self._deserialize('[BoardRow]', self._unwrap_collection(response))
def update_board_rows(self, board_rows, team_context, board):
"""UpdateBoardRows.
Update rows on a board
:param [BoardRow] board_rows: List of board rows to update
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board: Name or ID of the specific board
:rtype: [BoardRow]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
content = self._serialize.body(board_rows, '[BoardRow]')
response = self._send(http_method='PUT',
location_id='0863355d-aefd-4d63-8669-984c9b7b0e78',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('[BoardRow]', self._unwrap_collection(response))
def get_team_days_off(self, team_context, iteration_id):
"""GetTeamDaysOff.
Get team's days off for an iteration
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:rtype: :class:`<TeamSettingsDaysOff> <azure.devops.v5_1.work.models.TeamSettingsDaysOff>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str')
response = self._send(http_method='GET',
location_id='2d4faa2e-9150-4cbf-a47a-932b1b4a0773',
version='5.1',
route_values=route_values)
return self._deserialize('TeamSettingsDaysOff', response)
def update_team_days_off(self, days_off_patch, team_context, iteration_id):
"""UpdateTeamDaysOff.
Set a team's days off for an iteration
:param :class:`<TeamSettingsDaysOffPatch> <azure.devops.v5_1.work.models.TeamSettingsDaysOffPatch>` days_off_patch: Team's days off patch containing a list of start and end dates
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:rtype: :class:`<TeamSettingsDaysOff> <azure.devops.v5_1.work.models.TeamSettingsDaysOff>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str')
content = self._serialize.body(days_off_patch, 'TeamSettingsDaysOffPatch')
response = self._send(http_method='PATCH',
location_id='2d4faa2e-9150-4cbf-a47a-932b1b4a0773',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('TeamSettingsDaysOff', response)
def get_team_field_values(self, team_context):
"""GetTeamFieldValues.
Get a collection of team field values
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:rtype: :class:`<TeamFieldValues> <azure.devops.v5_1.work.models.TeamFieldValues>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
response = self._send(http_method='GET',
location_id='07ced576-58ed-49e6-9c1e-5cb53ab8bf2a',
version='5.1',
route_values=route_values)
return self._deserialize('TeamFieldValues', response)
def update_team_field_values(self, patch, team_context):
"""UpdateTeamFieldValues.
Update team field values
:param :class:`<TeamFieldValuesPatch> <azure.devops.v5_1.work.models.TeamFieldValuesPatch>` patch:
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:rtype: :class:`<TeamFieldValues> <azure.devops.v5_1.work.models.TeamFieldValues>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
content = self._serialize.body(patch, 'TeamFieldValuesPatch')
response = self._send(http_method='PATCH',
location_id='07ced576-58ed-49e6-9c1e-5cb53ab8bf2a',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('TeamFieldValues', response)
def get_team_settings(self, team_context):
"""GetTeamSettings.
Get a team's settings
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:rtype: :class:`<TeamSetting> <azure.devops.v5_1.work.models.TeamSetting>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
response = self._send(http_method='GET',
location_id='c3c1012b-bea7-49d7-b45e-1664e566f84c',
version='5.1',
route_values=route_values)
return self._deserialize('TeamSetting', response)
def update_team_settings(self, team_settings_patch, team_context):
"""UpdateTeamSettings.
Update a team's settings
:param :class:`<TeamSettingsPatch> <azure.devops.v5_1.work.models.TeamSettingsPatch>` team_settings_patch: TeamSettings changes
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:rtype: :class:`<TeamSetting> <azure.devops.v5_1.work.models.TeamSetting>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
content = self._serialize.body(team_settings_patch, 'TeamSettingsPatch')
response = self._send(http_method='PATCH',
location_id='c3c1012b-bea7-49d7-b45e-1664e566f84c',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('TeamSetting', response)
def get_iteration_work_items(self, team_context, iteration_id):
"""GetIterationWorkItems.
[Preview API] Get work items for iteration
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:rtype: :class:`<IterationWorkItems> <azure.devops.v5_1.work.models.IterationWorkItems>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str')
response = self._send(http_method='GET',
location_id='5b3ef1a6-d3ab-44cd-bafd-c7f45db850fa',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('IterationWorkItems', response)
def reorder_backlog_work_items(self, operation, team_context):
"""ReorderBacklogWorkItems.
[Preview API] Reorder Product Backlog/Boards Work Items
:param :class:`<ReorderOperation> <azure.devops.v5_1.work.models.ReorderOperation>` operation:
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:rtype: [ReorderResult]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
content = self._serialize.body(operation, 'ReorderOperation')
response = self._send(http_method='PATCH',
location_id='1c22b714-e7e4-41b9-85e0-56ee13ef55ed',
version='5.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('[ReorderResult]', self._unwrap_collection(response))
def reorder_iteration_work_items(self, operation, team_context, iteration_id):
"""ReorderIterationWorkItems.
[Preview API] Reorder Sprint Backlog/Taskboard Work Items
:param :class:`<ReorderOperation> <azure.devops.v5_1.work.models.ReorderOperation>` operation:
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: The id of the iteration
:rtype: [ReorderResult]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str')
content = self._serialize.body(operation, 'ReorderOperation')
response = self._send(http_method='PATCH',
location_id='47755db2-d7eb-405a-8c25-675401525fc9',
version='5.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('[ReorderResult]', self._unwrap_collection(response))
| 47.256991 | 250 | 0.591181 |
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class WorkClient(Client):
"""Work
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(WorkClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '1d4f49f9-02b9-4e26-b826-2cdb6195f2a9'
def get_backlog_configurations(self, team_context):
"""GetBacklogConfigurations.
Gets backlog configuration for a team
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:rtype: :class:`<BacklogConfiguration> <azure.devops.v5_1.work.models.BacklogConfiguration>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
response = self._send(http_method='GET',
location_id='7799f497-3cb5-4f16-ad4f-5cd06012db64',
version='5.1',
route_values=route_values)
return self._deserialize('BacklogConfiguration', response)
def get_backlog_level_work_items(self, team_context, backlog_id):
"""GetBacklogLevelWorkItems.
[Preview API] Get a list of work items within a backlog level
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str backlog_id:
:rtype: :class:`<BacklogLevelWorkItems> <azure.devops.v5_1.work.models.BacklogLevelWorkItems>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if backlog_id is not None:
route_values['backlogId'] = self._serialize.url('backlog_id', backlog_id, 'str')
response = self._send(http_method='GET',
location_id='7c468d96-ab1d-4294-a360-92f07e9ccd98',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('BacklogLevelWorkItems', response)
def get_backlog(self, team_context, id):
"""GetBacklog.
[Preview API] Get a backlog level
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str id: The id of the backlog level
:rtype: :class:`<BacklogLevelConfiguration> <azure.devops.v5_1.work.models.BacklogLevelConfiguration>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
response = self._send(http_method='GET',
location_id='a93726f9-7867-4e38-b4f2-0bfafc2f6a94',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('BacklogLevelConfiguration', response)
def get_backlogs(self, team_context):
"""GetBacklogs.
[Preview API] List all backlog levels
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:rtype: [BacklogLevelConfiguration]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
response = self._send(http_method='GET',
location_id='a93726f9-7867-4e38-b4f2-0bfafc2f6a94',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('[BacklogLevelConfiguration]', self._unwrap_collection(response))
def get_column_suggested_values(self, project=None):
"""GetColumnSuggestedValues.
Get available board columns in a project
:param str project: Project ID or project name
:rtype: [BoardSuggestedValue]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='eb7ec5a3-1ba3-4fd1-b834-49a5a387e57d',
version='5.1',
route_values=route_values)
return self._deserialize('[BoardSuggestedValue]', self._unwrap_collection(response))
def get_board_mapping_parent_items(self, team_context, child_backlog_context_category_ref_name, workitem_ids):
"""GetBoardMappingParentItems.
[Preview API] Returns the list of parent field filter model for the given list of workitem ids
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str child_backlog_context_category_ref_name:
:param [int] workitem_ids:
:rtype: [ParentChildWIMap]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
query_parameters = {}
if child_backlog_context_category_ref_name is not None:
query_parameters['childBacklogContextCategoryRefName'] = self._serialize.query('child_backlog_context_category_ref_name', child_backlog_context_category_ref_name, 'str')
if workitem_ids is not None:
workitem_ids = ",".join(map(str, workitem_ids))
query_parameters['workitemIds'] = self._serialize.query('workitem_ids', workitem_ids, 'str')
response = self._send(http_method='GET',
location_id='186abea3-5c35-432f-9e28-7a15b4312a0e',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[ParentChildWIMap]', self._unwrap_collection(response))
def get_row_suggested_values(self, project=None):
"""GetRowSuggestedValues.
Get available board rows in a project
:param str project: Project ID or project name
:rtype: [BoardSuggestedValue]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='bb494cc6-a0f5-4c6c-8dca-ea6912e79eb9',
version='5.1',
route_values=route_values)
return self._deserialize('[BoardSuggestedValue]', self._unwrap_collection(response))
def get_board(self, team_context, id):
"""GetBoard.
Get board
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str id: identifier for board, either board's backlog level name (Eg:"Stories") or Id
:rtype: :class:`<Board> <azure.devops.v5_1.work.models.Board>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
response = self._send(http_method='GET',
location_id='23ad19fc-3b8e-4877-8462-b3f92bc06b40',
version='5.1',
route_values=route_values)
return self._deserialize('Board', response)
def get_boards(self, team_context):
"""GetBoards.
Get boards
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:rtype: [BoardReference]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
response = self._send(http_method='GET',
location_id='23ad19fc-3b8e-4877-8462-b3f92bc06b40',
version='5.1',
route_values=route_values)
return self._deserialize('[BoardReference]', self._unwrap_collection(response))
def set_board_options(self, options, team_context, id):
"""SetBoardOptions.
Update board options
:param {str} options: options to updated
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str id: identifier for board, either category plural name (Eg:"Stories") or guid
:rtype: {str}
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
content = self._serialize.body(options, '{str}')
response = self._send(http_method='PUT',
location_id='23ad19fc-3b8e-4877-8462-b3f92bc06b40',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('{str}', self._unwrap_collection(response))
def get_board_user_settings(self, team_context, board):
"""GetBoardUserSettings.
[Preview API] Get board user settings for a board id
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board: Board ID or Name
:rtype: :class:`<BoardUserSettings> <azure.devops.v5_1.work.models.BoardUserSettings>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
response = self._send(http_method='GET',
location_id='b30d9f58-1891-4b0a-b168-c46408f919b0',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('BoardUserSettings', response)
def update_board_user_settings(self, board_user_settings, team_context, board):
"""UpdateBoardUserSettings.
[Preview API] Update board user settings for the board id
:param {str} board_user_settings:
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board:
:rtype: :class:`<BoardUserSettings> <azure.devops.v5_1.work.models.BoardUserSettings>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
content = self._serialize.body(board_user_settings, '{str}')
response = self._send(http_method='PATCH',
location_id='b30d9f58-1891-4b0a-b168-c46408f919b0',
version='5.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('BoardUserSettings', response)
def get_capacities_with_identity_ref(self, team_context, iteration_id):
"""GetCapacitiesWithIdentityRef.
Get a team's capacity
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:rtype: [TeamMemberCapacityIdentityRef]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str')
response = self._send(http_method='GET',
location_id='74412d15-8c1a-4352-a48d-ef1ed5587d57',
version='5.1',
route_values=route_values)
return self._deserialize('[TeamMemberCapacityIdentityRef]', self._unwrap_collection(response))
def get_capacity_with_identity_ref(self, team_context, iteration_id, team_member_id):
"""GetCapacityWithIdentityRef.
Get a team member's capacity
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:param str team_member_id: ID of the team member
:rtype: :class:`<TeamMemberCapacityIdentityRef> <azure.devops.v5_1.work.models.TeamMemberCapacityIdentityRef>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str')
if team_member_id is not None:
route_values['teamMemberId'] = self._serialize.url('team_member_id', team_member_id, 'str')
response = self._send(http_method='GET',
location_id='74412d15-8c1a-4352-a48d-ef1ed5587d57',
version='5.1',
route_values=route_values)
return self._deserialize('TeamMemberCapacityIdentityRef', response)
def replace_capacities_with_identity_ref(self, capacities, team_context, iteration_id):
"""ReplaceCapacitiesWithIdentityRef.
Replace a team's capacity
:param [TeamMemberCapacityIdentityRef] capacities: Team capacity to replace
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:rtype: [TeamMemberCapacityIdentityRef]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str')
content = self._serialize.body(capacities, '[TeamMemberCapacityIdentityRef]')
response = self._send(http_method='PUT',
location_id='74412d15-8c1a-4352-a48d-ef1ed5587d57',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('[TeamMemberCapacityIdentityRef]', self._unwrap_collection(response))
def update_capacity_with_identity_ref(self, patch, team_context, iteration_id, team_member_id):
"""UpdateCapacityWithIdentityRef.
Update a team member's capacity
:param :class:`<CapacityPatch> <azure.devops.v5_1.work.models.CapacityPatch>` patch: Updated capacity
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:param str team_member_id: ID of the team member
:rtype: :class:`<TeamMemberCapacityIdentityRef> <azure.devops.v5_1.work.models.TeamMemberCapacityIdentityRef>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str')
if team_member_id is not None:
route_values['teamMemberId'] = self._serialize.url('team_member_id', team_member_id, 'str')
content = self._serialize.body(patch, 'CapacityPatch')
response = self._send(http_method='PATCH',
location_id='74412d15-8c1a-4352-a48d-ef1ed5587d57',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('TeamMemberCapacityIdentityRef', response)
def get_board_card_rule_settings(self, team_context, board):
"""GetBoardCardRuleSettings.
Get board card Rule settings for the board id or board by name
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board:
:rtype: :class:`<BoardCardRuleSettings> <azure.devops.v5_1.work.models.BoardCardRuleSettings>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
response = self._send(http_method='GET',
location_id='b044a3d9-02ea-49c7-91a1-b730949cc896',
version='5.1',
route_values=route_values)
return self._deserialize('BoardCardRuleSettings', response)
def update_board_card_rule_settings(self, board_card_rule_settings, team_context, board):
"""UpdateBoardCardRuleSettings.
Update board card Rule settings for the board id or board by name
:param :class:`<BoardCardRuleSettings> <azure.devops.v5_1.work.models.BoardCardRuleSettings>` board_card_rule_settings:
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board:
:rtype: :class:`<BoardCardRuleSettings> <azure.devops.v5_1.work.models.BoardCardRuleSettings>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
content = self._serialize.body(board_card_rule_settings, 'BoardCardRuleSettings')
response = self._send(http_method='PATCH',
location_id='b044a3d9-02ea-49c7-91a1-b730949cc896',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('BoardCardRuleSettings', response)
def update_taskboard_card_rule_settings(self, board_card_rule_settings, team_context):
"""UpdateTaskboardCardRuleSettings.
[Preview API] Update taskboard card Rule settings
:param :class:`<BoardCardRuleSettings> <azure.devops.v5_1.work.models.BoardCardRuleSettings>` board_card_rule_settings:
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
content = self._serialize.body(board_card_rule_settings, 'BoardCardRuleSettings')
self._send(http_method='PATCH',
location_id='3f84a8d1-1aab-423e-a94b-6dcbdcca511f',
version='5.1-preview.2',
route_values=route_values,
content=content)
def get_board_card_settings(self, team_context, board):
"""GetBoardCardSettings.
Get board card settings for the board id or board by name
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board:
:rtype: :class:`<BoardCardSettings> <azure.devops.v5_1.work.models.BoardCardSettings>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
response = self._send(http_method='GET',
location_id='07c3b467-bc60-4f05-8e34-599ce288fafc',
version='5.1',
route_values=route_values)
return self._deserialize('BoardCardSettings', response)
def update_board_card_settings(self, board_card_settings_to_save, team_context, board):
"""UpdateBoardCardSettings.
Update board card settings for the board id or board by name
:param :class:`<BoardCardSettings> <azure.devops.v5_1.work.models.BoardCardSettings>` board_card_settings_to_save:
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board:
:rtype: :class:`<BoardCardSettings> <azure.devops.v5_1.work.models.BoardCardSettings>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
content = self._serialize.body(board_card_settings_to_save, 'BoardCardSettings')
response = self._send(http_method='PUT',
location_id='07c3b467-bc60-4f05-8e34-599ce288fafc',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('BoardCardSettings', response)
def update_taskboard_card_settings(self, board_card_settings_to_save, team_context):
"""UpdateTaskboardCardSettings.
[Preview API] Update taskboard card settings
:param :class:`<BoardCardSettings> <azure.devops.v5_1.work.models.BoardCardSettings>` board_card_settings_to_save:
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
content = self._serialize.body(board_card_settings_to_save, 'BoardCardSettings')
self._send(http_method='PUT',
location_id='0d63745f-31f3-4cf3-9056-2a064e567637',
version='5.1-preview.2',
route_values=route_values,
content=content)
def get_board_chart(self, team_context, board, name):
"""GetBoardChart.
Get a board chart
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board: Identifier for board, either board's backlog level name (Eg:"Stories") or Id
:param str name: The chart name
:rtype: :class:`<BoardChart> <azure.devops.v5_1.work.models.BoardChart>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
if name is not None:
route_values['name'] = self._serialize.url('name', name, 'str')
response = self._send(http_method='GET',
location_id='45fe888c-239e-49fd-958c-df1a1ab21d97',
version='5.1',
route_values=route_values)
return self._deserialize('BoardChart', response)
def get_board_charts(self, team_context, board):
"""GetBoardCharts.
Get board charts
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board: Identifier for board, either board's backlog level name (Eg:"Stories") or Id
:rtype: [BoardChartReference]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
response = self._send(http_method='GET',
location_id='45fe888c-239e-49fd-958c-df1a1ab21d97',
version='5.1',
route_values=route_values)
return self._deserialize('[BoardChartReference]', self._unwrap_collection(response))
def update_board_chart(self, chart, team_context, board, name):
"""UpdateBoardChart.
Update a board chart
:param :class:`<BoardChart> <azure.devops.v5_1.work.models.BoardChart>` chart:
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board: Identifier for board, either board's backlog level name (Eg:"Stories") or Id
:param str name: The chart name
:rtype: :class:`<BoardChart> <azure.devops.v5_1.work.models.BoardChart>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
if name is not None:
route_values['name'] = self._serialize.url('name', name, 'str')
content = self._serialize.body(chart, 'BoardChart')
response = self._send(http_method='PATCH',
location_id='45fe888c-239e-49fd-958c-df1a1ab21d97',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('BoardChart', response)
def get_board_columns(self, team_context, board):
"""GetBoardColumns.
Get columns on a board
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board: Name or ID of the specific board
:rtype: [BoardColumn]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
response = self._send(http_method='GET',
location_id='c555d7ff-84e1-47df-9923-a3fe0cd8751b',
version='5.1',
route_values=route_values)
return self._deserialize('[BoardColumn]', self._unwrap_collection(response))
def update_board_columns(self, board_columns, team_context, board):
"""UpdateBoardColumns.
Update columns on a board
:param [BoardColumn] board_columns: List of board columns to update
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board: Name or ID of the specific board
:rtype: [BoardColumn]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
content = self._serialize.body(board_columns, '[BoardColumn]')
response = self._send(http_method='PUT',
location_id='c555d7ff-84e1-47df-9923-a3fe0cd8751b',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('[BoardColumn]', self._unwrap_collection(response))
def get_delivery_timeline_data(self, project, id, revision=None, start_date=None, end_date=None):
"""GetDeliveryTimelineData.
Get Delivery View Data
:param str project: Project ID or project name
:param str id: Identifier for delivery view
:param int revision: Revision of the plan for which you want data. If the current plan is a different revision you will get an ViewRevisionMismatchException exception. If you do not supply a revision you will get data for the latest revision.
:param datetime start_date: The start date of timeline
:param datetime end_date: The end date of timeline
:rtype: :class:`<DeliveryViewData> <azure.devops.v5_1.work.models.DeliveryViewData>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
query_parameters = {}
if revision is not None:
query_parameters['revision'] = self._serialize.query('revision', revision, 'int')
if start_date is not None:
query_parameters['startDate'] = self._serialize.query('start_date', start_date, 'iso-8601')
if end_date is not None:
query_parameters['endDate'] = self._serialize.query('end_date', end_date, 'iso-8601')
response = self._send(http_method='GET',
location_id='bdd0834e-101f-49f0-a6ae-509f384a12b4',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('DeliveryViewData', response)
def delete_team_iteration(self, team_context, id):
"""DeleteTeamIteration.
Delete a team's iteration by iterationId
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str id: ID of the iteration
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
self._send(http_method='DELETE',
location_id='c9175577-28a1-4b06-9197-8636af9f64ad',
version='5.1',
route_values=route_values)
def get_team_iteration(self, team_context, id):
"""GetTeamIteration.
Get team's iteration by iterationId
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str id: ID of the iteration
:rtype: :class:`<TeamSettingsIteration> <azure.devops.v5_1.work.models.TeamSettingsIteration>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
response = self._send(http_method='GET',
location_id='c9175577-28a1-4b06-9197-8636af9f64ad',
version='5.1',
route_values=route_values)
return self._deserialize('TeamSettingsIteration', response)
def get_team_iterations(self, team_context, timeframe=None):
"""GetTeamIterations.
Get a team's iterations using timeframe filter
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str timeframe: A filter for which iterations are returned based on relative time. Only Current is supported currently.
:rtype: [TeamSettingsIteration]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
query_parameters = {}
if timeframe is not None:
query_parameters['$timeframe'] = self._serialize.query('timeframe', timeframe, 'str')
response = self._send(http_method='GET',
location_id='c9175577-28a1-4b06-9197-8636af9f64ad',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[TeamSettingsIteration]', self._unwrap_collection(response))
def post_team_iteration(self, iteration, team_context):
"""PostTeamIteration.
Add an iteration to the team
:param :class:`<TeamSettingsIteration> <azure.devops.v5_1.work.models.TeamSettingsIteration>` iteration: Iteration to add
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:rtype: :class:`<TeamSettingsIteration> <azure.devops.v5_1.work.models.TeamSettingsIteration>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
content = self._serialize.body(iteration, 'TeamSettingsIteration')
response = self._send(http_method='POST',
location_id='c9175577-28a1-4b06-9197-8636af9f64ad',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('TeamSettingsIteration', response)
def create_plan(self, posted_plan, project):
"""CreatePlan.
Add a new plan for the team
:param :class:`<CreatePlan> <azure.devops.v5_1.work.models.CreatePlan>` posted_plan: Plan definition
:param str project: Project ID or project name
:rtype: :class:`<Plan> <azure.devops.v5_1.work.models.Plan>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(posted_plan, 'CreatePlan')
response = self._send(http_method='POST',
location_id='0b42cb47-cd73-4810-ac90-19c9ba147453',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('Plan', response)
def delete_plan(self, project, id):
"""DeletePlan.
Delete the specified plan
:param str project: Project ID or project name
:param str id: Identifier of the plan
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
self._send(http_method='DELETE',
location_id='0b42cb47-cd73-4810-ac90-19c9ba147453',
version='5.1',
route_values=route_values)
def get_plan(self, project, id):
"""GetPlan.
Get the information for the specified plan
:param str project: Project ID or project name
:param str id: Identifier of the plan
:rtype: :class:`<Plan> <azure.devops.v5_1.work.models.Plan>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
response = self._send(http_method='GET',
location_id='0b42cb47-cd73-4810-ac90-19c9ba147453',
version='5.1',
route_values=route_values)
return self._deserialize('Plan', response)
def get_plans(self, project):
"""GetPlans.
Get the information for all the plans configured for the given team
:param str project: Project ID or project name
:rtype: [Plan]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='0b42cb47-cd73-4810-ac90-19c9ba147453',
version='5.1',
route_values=route_values)
return self._deserialize('[Plan]', self._unwrap_collection(response))
def update_plan(self, updated_plan, project, id):
"""UpdatePlan.
Update the information for the specified plan
:param :class:`<UpdatePlan> <azure.devops.v5_1.work.models.UpdatePlan>` updated_plan: Plan definition to be updated
:param str project: Project ID or project name
:param str id: Identifier of the plan
:rtype: :class:`<Plan> <azure.devops.v5_1.work.models.Plan>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
content = self._serialize.body(updated_plan, 'UpdatePlan')
response = self._send(http_method='PUT',
location_id='0b42cb47-cd73-4810-ac90-19c9ba147453',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('Plan', response)
def get_process_configuration(self, project):
"""GetProcessConfiguration.
[Preview API] Get process configuration
:param str project: Project ID or project name
:rtype: :class:`<ProcessConfiguration> <azure.devops.v5_1.work.models.ProcessConfiguration>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='f901ba42-86d2-4b0c-89c1-3f86d06daa84',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('ProcessConfiguration', response)
def get_board_rows(self, team_context, board):
"""GetBoardRows.
Get rows on a board
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board: Name or ID of the specific board
:rtype: [BoardRow]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
response = self._send(http_method='GET',
location_id='0863355d-aefd-4d63-8669-984c9b7b0e78',
version='5.1',
route_values=route_values)
return self._deserialize('[BoardRow]', self._unwrap_collection(response))
def update_board_rows(self, board_rows, team_context, board):
"""UpdateBoardRows.
Update rows on a board
:param [BoardRow] board_rows: List of board rows to update
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str board: Name or ID of the specific board
:rtype: [BoardRow]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if board is not None:
route_values['board'] = self._serialize.url('board', board, 'str')
content = self._serialize.body(board_rows, '[BoardRow]')
response = self._send(http_method='PUT',
location_id='0863355d-aefd-4d63-8669-984c9b7b0e78',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('[BoardRow]', self._unwrap_collection(response))
def get_team_days_off(self, team_context, iteration_id):
"""GetTeamDaysOff.
Get team's days off for an iteration
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:rtype: :class:`<TeamSettingsDaysOff> <azure.devops.v5_1.work.models.TeamSettingsDaysOff>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str')
response = self._send(http_method='GET',
location_id='2d4faa2e-9150-4cbf-a47a-932b1b4a0773',
version='5.1',
route_values=route_values)
return self._deserialize('TeamSettingsDaysOff', response)
def update_team_days_off(self, days_off_patch, team_context, iteration_id):
"""UpdateTeamDaysOff.
Set a team's days off for an iteration
:param :class:`<TeamSettingsDaysOffPatch> <azure.devops.v5_1.work.models.TeamSettingsDaysOffPatch>` days_off_patch: Team's days off patch containing a list of start and end dates
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:rtype: :class:`<TeamSettingsDaysOff> <azure.devops.v5_1.work.models.TeamSettingsDaysOff>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str')
content = self._serialize.body(days_off_patch, 'TeamSettingsDaysOffPatch')
response = self._send(http_method='PATCH',
location_id='2d4faa2e-9150-4cbf-a47a-932b1b4a0773',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('TeamSettingsDaysOff', response)
def get_team_field_values(self, team_context):
"""GetTeamFieldValues.
Get a collection of team field values
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:rtype: :class:`<TeamFieldValues> <azure.devops.v5_1.work.models.TeamFieldValues>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
response = self._send(http_method='GET',
location_id='07ced576-58ed-49e6-9c1e-5cb53ab8bf2a',
version='5.1',
route_values=route_values)
return self._deserialize('TeamFieldValues', response)
def update_team_field_values(self, patch, team_context):
"""UpdateTeamFieldValues.
Update team field values
:param :class:`<TeamFieldValuesPatch> <azure.devops.v5_1.work.models.TeamFieldValuesPatch>` patch:
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:rtype: :class:`<TeamFieldValues> <azure.devops.v5_1.work.models.TeamFieldValues>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
content = self._serialize.body(patch, 'TeamFieldValuesPatch')
response = self._send(http_method='PATCH',
location_id='07ced576-58ed-49e6-9c1e-5cb53ab8bf2a',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('TeamFieldValues', response)
def get_team_settings(self, team_context):
"""GetTeamSettings.
Get a team's settings
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:rtype: :class:`<TeamSetting> <azure.devops.v5_1.work.models.TeamSetting>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
response = self._send(http_method='GET',
location_id='c3c1012b-bea7-49d7-b45e-1664e566f84c',
version='5.1',
route_values=route_values)
return self._deserialize('TeamSetting', response)
def update_team_settings(self, team_settings_patch, team_context):
"""UpdateTeamSettings.
Update a team's settings
:param :class:`<TeamSettingsPatch> <azure.devops.v5_1.work.models.TeamSettingsPatch>` team_settings_patch: TeamSettings changes
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:rtype: :class:`<TeamSetting> <azure.devops.v5_1.work.models.TeamSetting>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
content = self._serialize.body(team_settings_patch, 'TeamSettingsPatch')
response = self._send(http_method='PATCH',
location_id='c3c1012b-bea7-49d7-b45e-1664e566f84c',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('TeamSetting', response)
def get_iteration_work_items(self, team_context, iteration_id):
"""GetIterationWorkItems.
[Preview API] Get work items for iteration
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:rtype: :class:`<IterationWorkItems> <azure.devops.v5_1.work.models.IterationWorkItems>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str')
response = self._send(http_method='GET',
location_id='5b3ef1a6-d3ab-44cd-bafd-c7f45db850fa',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('IterationWorkItems', response)
def reorder_backlog_work_items(self, operation, team_context):
"""ReorderBacklogWorkItems.
[Preview API] Reorder Product Backlog/Boards Work Items
:param :class:`<ReorderOperation> <azure.devops.v5_1.work.models.ReorderOperation>` operation:
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:rtype: [ReorderResult]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
content = self._serialize.body(operation, 'ReorderOperation')
response = self._send(http_method='PATCH',
location_id='1c22b714-e7e4-41b9-85e0-56ee13ef55ed',
version='5.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('[ReorderResult]', self._unwrap_collection(response))
def reorder_iteration_work_items(self, operation, team_context, iteration_id):
"""ReorderIterationWorkItems.
[Preview API] Reorder Sprint Backlog/Taskboard Work Items
:param :class:`<ReorderOperation> <azure.devops.v5_1.work.models.ReorderOperation>` operation:
:param :class:`<TeamContext> <azure.devops.v5_1.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: The id of the iteration
:rtype: [ReorderResult]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str')
content = self._serialize.body(operation, 'ReorderOperation')
response = self._send(http_method='PATCH',
location_id='47755db2-d7eb-405a-8c25-675401525fc9',
version='5.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('[ReorderResult]', self._unwrap_collection(response))
| false | true |
f7210264f1cece9dc5803d333f7cdf0b48ec3e1d | 68,178 | py | Python | pymc3/tests/test_distributions.py | semohr/pymc3 | 198d13e2ed6f32b33fd8f4b591a47dc8dd8fe2df | [
"Apache-2.0"
] | null | null | null | pymc3/tests/test_distributions.py | semohr/pymc3 | 198d13e2ed6f32b33fd8f4b591a47dc8dd8fe2df | [
"Apache-2.0"
] | null | null | null | pymc3/tests/test_distributions.py | semohr/pymc3 | 198d13e2ed6f32b33fd8f4b591a47dc8dd8fe2df | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import sys
from .helpers import SeededTest, select_by_precision
from ..vartypes import continuous_types
from ..model import Model, Point, Deterministic
from ..blocking import DictToVarBijection
from ..distributions import (
DensityDist,
Categorical,
Multinomial,
VonMises,
Dirichlet,
MvStudentT,
MvNormal,
MatrixNormal,
ZeroInflatedPoisson,
ZeroInflatedNegativeBinomial,
Constant,
Poisson,
Bernoulli,
Beta,
BetaBinomial,
HalfStudentT,
StudentT,
Weibull,
Pareto,
InverseGamma,
Gamma,
Cauchy,
HalfCauchy,
Lognormal,
Laplace,
NegativeBinomial,
Geometric,
Exponential,
ExGaussian,
Normal,
TruncatedNormal,
Flat,
LKJCorr,
Wald,
ChiSquared,
HalfNormal,
DiscreteUniform,
Bound,
Uniform,
Triangular,
Binomial,
SkewNormal,
DiscreteWeibull,
Gumbel,
Logistic,
OrderedLogistic,
LogitNormal,
Interpolated,
ZeroInflatedBinomial,
HalfFlat,
AR1,
KroneckerNormal,
Rice,
Kumaraswamy,
Moyal,
HyperGeometric,
)
from ..distributions import continuous
from pymc3.theanof import floatX
import pymc3 as pm
from numpy import array, inf, log, exp
from numpy.testing import assert_almost_equal, assert_allclose, assert_equal
import numpy.random as nr
import numpy as np
import pytest
from scipy import integrate
import scipy.stats.distributions as sp
import scipy.stats
from scipy.special import logit
import theano
import theano.tensor as tt
from ..math import kronecker
def get_lkj_cases():
"""
Log probabilities calculated using the formulas in:
http://www.sciencedirect.com/science/article/pii/S0047259X09000876
"""
tri = np.array([0.7, 0.0, -0.7])
return [
(tri, 1, 3, 1.5963125911388549),
(tri, 3, 3, -7.7963493376312742),
(tri, 0, 3, -np.inf),
(np.array([1.1, 0.0, -0.7]), 1, 3, -np.inf),
(np.array([0.7, 0.0, -1.1]), 1, 3, -np.inf),
]
LKJ_CASES = get_lkj_cases()
class Domain:
def __init__(self, vals, dtype=None, edges=None, shape=None):
avals = array(vals, dtype=dtype)
if dtype is None and not str(avals.dtype).startswith("int"):
avals = avals.astype(theano.config.floatX)
vals = [array(v, dtype=avals.dtype) for v in vals]
if edges is None:
edges = array(vals[0]), array(vals[-1])
vals = vals[1:-1]
if shape is None:
shape = avals[0].shape
self.vals = vals
self.shape = shape
self.lower, self.upper = edges
self.dtype = avals.dtype
def __add__(self, other):
return Domain(
[v + other for v in self.vals],
self.dtype,
(self.lower + other, self.upper + other),
self.shape,
)
def __mul__(self, other):
try:
return Domain(
[v * other for v in self.vals],
self.dtype,
(self.lower * other, self.upper * other),
self.shape,
)
except TypeError:
return Domain(
[v * other for v in self.vals],
self.dtype,
(self.lower, self.upper),
self.shape,
)
def __neg__(self):
return Domain([-v for v in self.vals], self.dtype, (-self.lower, -self.upper), self.shape)
def product(domains, n_samples=-1):
"""Get an iterator over a product of domains.
Args:
domains: a dictionary of (name, object) pairs, where the objects
must be "domain-like", as in, have a `.vals` property
n_samples: int, maximum samples to return. -1 to return whole product
Returns:
list of the cartesian product of the domains
"""
try:
names, domains = zip(*domains.items())
except ValueError: # domains.items() is empty
return [{}]
all_vals = [zip(names, val) for val in itertools.product(*[d.vals for d in domains])]
if n_samples > 0 and len(all_vals) > n_samples:
return (all_vals[j] for j in nr.choice(len(all_vals), n_samples, replace=False))
return all_vals
R = Domain([-inf, -2.1, -1, -0.01, 0.0, 0.01, 1, 2.1, inf])
Rplus = Domain([0, 0.01, 0.1, 0.9, 0.99, 1, 1.5, 2, 100, inf])
Rplusbig = Domain([0, 0.5, 0.9, 0.99, 1, 1.5, 2, 20, inf])
Rminusbig = Domain([-inf, -2, -1.5, -1, -0.99, -0.9, -0.5, -0.01, 0])
Unit = Domain([0, 0.001, 0.1, 0.5, 0.75, 0.99, 1])
Circ = Domain([-np.pi, -2.1, -1, -0.01, 0.0, 0.01, 1, 2.1, np.pi])
Runif = Domain([-1, -0.4, 0, 0.4, 1])
Rdunif = Domain([-10, 0, 10.0])
Rplusunif = Domain([0, 0.5, inf])
Rplusdunif = Domain([2, 10, 100], "int64")
I = Domain([-1000, -3, -2, -1, 0, 1, 2, 3, 1000], "int64")
NatSmall = Domain([0, 3, 4, 5, 1000], "int64")
Nat = Domain([0, 1, 2, 3, 2000], "int64")
NatBig = Domain([0, 1, 2, 3, 5000, 50000], "int64")
PosNat = Domain([1, 2, 3, 2000], "int64")
Bool = Domain([0, 0, 1, 1], "int64")
def build_model(distfam, valuedomain, vardomains, extra_args=None):
if extra_args is None:
extra_args = {}
with Model() as m:
vals = {}
for v, dom in vardomains.items():
vals[v] = Flat(v, dtype=dom.dtype, shape=dom.shape, testval=dom.vals[0])
vals.update(extra_args)
distfam("value", shape=valuedomain.shape, transform=None, **vals)
return m
def integrate_nd(f, domain, shape, dtype):
if shape == () or shape == (1,):
if dtype in continuous_types:
return integrate.quad(f, domain.lower, domain.upper, epsabs=1e-8)[0]
else:
return sum(f(j) for j in range(domain.lower, domain.upper + 1))
elif shape == (2,):
def f2(a, b):
return f([a, b])
return integrate.dblquad(
f2,
domain.lower[0],
domain.upper[0],
lambda _: domain.lower[1],
lambda _: domain.upper[1],
)[0]
elif shape == (3,):
def f3(a, b, c):
return f([a, b, c])
return integrate.tplquad(
f3,
domain.lower[0],
domain.upper[0],
lambda _: domain.lower[1],
lambda _: domain.upper[1],
lambda _, __: domain.lower[2],
lambda _, __: domain.upper[2],
)[0]
else:
raise ValueError("Dont know how to integrate shape: " + str(shape))
def multinomial_logpdf(value, n, p):
if value.sum() == n and (0 <= value).all() and (value <= n).all():
logpdf = scipy.special.gammaln(n + 1)
logpdf -= scipy.special.gammaln(value + 1).sum()
logpdf += logpow(p, value).sum()
return logpdf
else:
return -inf
def beta_mu_sigma(value, mu, sigma):
kappa = mu * (1 - mu) / sigma ** 2 - 1
if kappa > 0:
return sp.beta.logpdf(value, mu * kappa, (1 - mu) * kappa)
else:
return -inf
class ProductDomain:
def __init__(self, domains):
self.vals = list(itertools.product(*[d.vals for d in domains]))
self.shape = (len(domains),) + domains[0].shape
self.lower = [d.lower for d in domains]
self.upper = [d.upper for d in domains]
self.dtype = domains[0].dtype
def Vector(D, n):
return ProductDomain([D] * n)
def SortedVector(n):
vals = []
np.random.seed(42)
for _ in range(10):
vals.append(np.sort(np.random.randn(n)))
return Domain(vals, edges=(None, None))
def UnitSortedVector(n):
vals = []
np.random.seed(42)
for _ in range(10):
vals.append(np.sort(np.random.rand(n)))
return Domain(vals, edges=(None, None))
def RealMatrix(n, m):
vals = []
np.random.seed(42)
for _ in range(10):
vals.append(np.random.randn(n, m))
return Domain(vals, edges=(None, None))
def simplex_values(n):
if n == 1:
yield array([1.0])
else:
for v in Unit.vals:
for vals in simplex_values(n - 1):
yield np.concatenate([[v], (1 - v) * vals])
def normal_logpdf_tau(value, mu, tau):
return normal_logpdf_cov(value, mu, np.linalg.inv(tau)).sum()
def normal_logpdf_cov(value, mu, cov):
return scipy.stats.multivariate_normal.logpdf(value, mu, cov).sum()
def normal_logpdf_chol(value, mu, chol):
return normal_logpdf_cov(value, mu, np.dot(chol, chol.T)).sum()
def normal_logpdf_chol_upper(value, mu, chol):
return normal_logpdf_cov(value, mu, np.dot(chol.T, chol)).sum()
def matrix_normal_logpdf_cov(value, mu, rowcov, colcov):
return scipy.stats.matrix_normal.logpdf(value, mu, rowcov, colcov)
def matrix_normal_logpdf_chol(value, mu, rowchol, colchol):
return matrix_normal_logpdf_cov(
value, mu, np.dot(rowchol, rowchol.T), np.dot(colchol, colchol.T)
)
def kron_normal_logpdf_cov(value, mu, covs, sigma):
cov = kronecker(*covs).eval()
if sigma is not None:
cov += sigma ** 2 * np.eye(*cov.shape)
return scipy.stats.multivariate_normal.logpdf(value, mu, cov).sum()
def kron_normal_logpdf_chol(value, mu, chols, sigma):
covs = [np.dot(chol, chol.T) for chol in chols]
return kron_normal_logpdf_cov(value, mu, covs, sigma=sigma)
def kron_normal_logpdf_evd(value, mu, evds, sigma):
covs = []
for eigs, Q in evds:
try:
eigs = eigs.eval()
except AttributeError:
pass
try:
Q = Q.eval()
except AttributeError:
pass
covs.append(np.dot(Q, np.dot(np.diag(eigs), Q.T)))
return kron_normal_logpdf_cov(value, mu, covs, sigma)
def betafn(a):
return floatX(scipy.special.gammaln(a).sum(-1) - scipy.special.gammaln(a.sum(-1)))
def logpow(v, p):
return np.choose(v == 0, [p * np.log(v), 0])
def discrete_weibull_logpmf(value, q, beta):
return floatX(
np.log(
np.power(floatX(q), np.power(floatX(value), floatX(beta)))
- np.power(floatX(q), np.power(floatX(value + 1), floatX(beta)))
)
)
def dirichlet_logpdf(value, a):
return floatX((-betafn(a) + logpow(value, a - 1).sum(-1)).sum())
def categorical_logpdf(value, p):
if value >= 0 and value <= len(p):
return floatX(np.log(np.moveaxis(p, -1, 0)[value]))
else:
return -inf
def mvt_logpdf(value, nu, Sigma, mu=0):
d = len(Sigma)
dist = np.atleast_2d(value) - mu
chol = np.linalg.cholesky(Sigma)
trafo = np.linalg.solve(chol, dist.T).T
logdet = np.log(np.diag(chol)).sum()
lgamma = scipy.special.gammaln
norm = lgamma((nu + d) / 2.0) - 0.5 * d * np.log(nu * np.pi) - lgamma(nu / 2.0)
logp = norm - logdet - (nu + d) / 2.0 * np.log1p((trafo * trafo).sum(-1) / nu)
return logp.sum()
def AR1_logpdf(value, k, tau_e):
tau = tau_e * (1 - k ** 2)
return (
sp.norm(loc=0, scale=1 / np.sqrt(tau)).logpdf(value[0])
+ sp.norm(loc=k * value[:-1], scale=1 / np.sqrt(tau_e)).logpdf(value[1:]).sum()
)
def invlogit(x, eps=sys.float_info.epsilon):
return (1.0 - 2.0 * eps) / (1.0 + np.exp(-x)) + eps
def orderedlogistic_logpdf(value, eta, cutpoints):
c = np.concatenate(([-np.inf], cutpoints, [np.inf]))
ps = np.array([invlogit(eta - cc) - invlogit(eta - cc1) for cc, cc1 in zip(c[:-1], c[1:])])
p = ps[value]
return np.where(np.all(ps >= 0), np.log(p), -np.inf)
class Simplex:
def __init__(self, n):
self.vals = list(simplex_values(n))
self.shape = (n,)
self.dtype = Unit.dtype
class MultiSimplex:
def __init__(self, n_dependent, n_independent):
self.vals = []
for simplex_value in itertools.product(simplex_values(n_dependent), repeat=n_independent):
self.vals.append(np.vstack(simplex_value))
self.shape = (n_independent, n_dependent)
self.dtype = Unit.dtype
def PdMatrix(n):
if n == 1:
return PdMatrix1
elif n == 2:
return PdMatrix2
elif n == 3:
return PdMatrix3
else:
raise ValueError("n out of bounds")
PdMatrix1 = Domain([np.eye(1), [[0.5]]], edges=(None, None))
PdMatrix2 = Domain([np.eye(2), [[0.5, 0.05], [0.05, 4.5]]], edges=(None, None))
PdMatrix3 = Domain([np.eye(3), [[0.5, 0.1, 0], [0.1, 1, 0], [0, 0, 2.5]]], edges=(None, None))
PdMatrixChol1 = Domain([np.eye(1), [[0.001]]], edges=(None, None))
PdMatrixChol2 = Domain([np.eye(2), [[0.1, 0], [10, 1]]], edges=(None, None))
PdMatrixChol3 = Domain([np.eye(3), [[0.1, 0, 0], [10, 100, 0], [0, 1, 10]]], edges=(None, None))
def PdMatrixChol(n):
if n == 1:
return PdMatrixChol1
elif n == 2:
return PdMatrixChol2
elif n == 3:
return PdMatrixChol3
else:
raise ValueError("n out of bounds")
PdMatrixCholUpper1 = Domain([np.eye(1), [[0.001]]], edges=(None, None))
PdMatrixCholUpper2 = Domain([np.eye(2), [[0.1, 10], [0, 1]]], edges=(None, None))
PdMatrixCholUpper3 = Domain(
[np.eye(3), [[0.1, 10, 0], [0, 100, 1], [0, 0, 10]]], edges=(None, None)
)
def PdMatrixCholUpper(n):
if n == 1:
return PdMatrixCholUpper1
elif n == 2:
return PdMatrixCholUpper2
elif n == 3:
return PdMatrixCholUpper3
else:
raise ValueError("n out of bounds")
def RandomPdMatrix(n):
A = np.random.rand(n, n)
return np.dot(A, A.T) + n * np.identity(n)
class TestMatchesScipy(SeededTest):
def pymc3_matches_scipy(
self,
pymc3_dist,
domain,
paramdomains,
scipy_dist,
decimal=None,
extra_args=None,
scipy_args=None,
):
if extra_args is None:
extra_args = {}
if scipy_args is None:
scipy_args = {}
model = build_model(pymc3_dist, domain, paramdomains, extra_args)
value = model.named_vars["value"]
def logp(args):
args.update(scipy_args)
return scipy_dist(**args)
self.check_logp(model, value, domain, paramdomains, logp, decimal=decimal)
def check_logp(self, model, value, domain, paramdomains, logp_reference, decimal=None):
domains = paramdomains.copy()
domains["value"] = domain
logp = model.fastlogp
for pt in product(domains, n_samples=100):
pt = Point(pt, model=model)
if decimal is None:
decimal = select_by_precision(float64=6, float32=3)
assert_almost_equal(logp(pt), logp_reference(pt), decimal=decimal, err_msg=str(pt))
def check_logcdf(
self,
pymc3_dist,
domain,
paramdomains,
scipy_logcdf,
decimal=None,
n_samples=100,
):
domains = paramdomains.copy()
domains["value"] = domain
if decimal is None:
decimal = select_by_precision(float64=6, float32=3)
for pt in product(domains, n_samples=n_samples):
params = dict(pt)
scipy_cdf = scipy_logcdf(**params)
value = params.pop("value")
dist = pymc3_dist.dist(**params)
assert_almost_equal(
dist.logcdf(value).tag.test_value,
scipy_cdf,
decimal=decimal,
err_msg=str(pt),
)
def check_int_to_1(self, model, value, domain, paramdomains):
pdf = model.fastfn(exp(model.logpt))
for pt in product(paramdomains, n_samples=10):
pt = Point(pt, value=value.tag.test_value, model=model)
bij = DictToVarBijection(value, (), pt)
pdfx = bij.mapf(pdf)
area = integrate_nd(pdfx, domain, value.dshape, value.dtype)
assert_almost_equal(area, 1, err_msg=str(pt))
def checkd(self, distfam, valuedomain, vardomains, checks=None, extra_args=None):
if checks is None:
checks = (self.check_int_to_1,)
if extra_args is None:
extra_args = {}
m = build_model(distfam, valuedomain, vardomains, extra_args=extra_args)
for check in checks:
check(m, m.named_vars["value"], valuedomain, vardomains)
def test_uniform(self):
self.pymc3_matches_scipy(
Uniform,
Runif,
{"lower": -Rplusunif, "upper": Rplusunif},
lambda value, lower, upper: sp.uniform.logpdf(value, lower, upper - lower),
)
self.check_logcdf(
Uniform,
Runif,
{"lower": -Rplusunif, "upper": Rplusunif},
lambda value, lower, upper: sp.uniform.logcdf(value, lower, upper - lower),
)
def test_triangular(self):
self.pymc3_matches_scipy(
Triangular,
Runif,
{"lower": -Rplusunif, "c": Runif, "upper": Rplusunif},
lambda value, c, lower, upper: sp.triang.logpdf(value, c - lower, lower, upper - lower),
)
self.check_logcdf(
Triangular,
Runif,
{"lower": -Rplusunif, "c": Runif, "upper": Rplusunif},
lambda value, c, lower, upper: sp.triang.logcdf(value, c - lower, lower, upper - lower),
)
def test_bound_normal(self):
PositiveNormal = Bound(Normal, lower=0.0)
self.pymc3_matches_scipy(
PositiveNormal,
Rplus,
{"mu": Rplus, "sigma": Rplus},
lambda value, mu, sigma: sp.norm.logpdf(value, mu, sigma),
decimal=select_by_precision(float64=6, float32=-1),
)
with Model():
x = PositiveNormal("x", mu=0, sigma=1, transform=None)
assert np.isinf(x.logp({"x": -1}))
def test_discrete_unif(self):
self.pymc3_matches_scipy(
DiscreteUniform,
Rdunif,
{"lower": -Rplusdunif, "upper": Rplusdunif},
lambda value, lower, upper: sp.randint.logpmf(value, lower, upper + 1),
)
def test_flat(self):
self.pymc3_matches_scipy(Flat, Runif, {}, lambda value: 0)
with Model():
x = Flat("a")
assert_allclose(x.tag.test_value, 0)
self.check_logcdf(Flat, Runif, {}, lambda value: np.log(0.5))
# Check infinite cases individually.
assert 0.0 == Flat.dist().logcdf(np.inf).tag.test_value
assert -np.inf == Flat.dist().logcdf(-np.inf).tag.test_value
def test_half_flat(self):
self.pymc3_matches_scipy(HalfFlat, Rplus, {}, lambda value: 0)
with Model():
x = HalfFlat("a", shape=2)
assert_allclose(x.tag.test_value, 1)
assert x.tag.test_value.shape == (2,)
self.check_logcdf(HalfFlat, Runif, {}, lambda value: -np.inf)
# Check infinite cases individually.
assert 0.0 == HalfFlat.dist().logcdf(np.inf).tag.test_value
assert -np.inf == HalfFlat.dist().logcdf(-np.inf).tag.test_value
def test_normal(self):
self.pymc3_matches_scipy(
Normal,
R,
{"mu": R, "sigma": Rplus},
lambda value, mu, sigma: sp.norm.logpdf(value, mu, sigma),
decimal=select_by_precision(float64=6, float32=1),
)
self.check_logcdf(
Normal,
R,
{"mu": R, "sigma": Rplus},
lambda value, mu, sigma: sp.norm.logcdf(value, mu, sigma),
)
def test_truncated_normal(self):
def scipy_logp(value, mu, sigma, lower, upper):
return sp.truncnorm.logpdf(
value, (lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma
)
self.pymc3_matches_scipy(
TruncatedNormal,
R,
{"mu": R, "sigma": Rplusbig, "lower": -Rplusbig, "upper": Rplusbig},
scipy_logp,
decimal=select_by_precision(float64=6, float32=1),
)
def test_half_normal(self):
self.pymc3_matches_scipy(
HalfNormal,
Rplus,
{"sigma": Rplus},
lambda value, sigma: sp.halfnorm.logpdf(value, scale=sigma),
decimal=select_by_precision(float64=6, float32=-1),
)
self.check_logcdf(
HalfNormal,
Rplus,
{"sigma": Rplus},
lambda value, sigma: sp.halfnorm.logcdf(value, scale=sigma),
)
def test_chi_squared(self):
self.pymc3_matches_scipy(
ChiSquared,
Rplus,
{"nu": Rplusdunif},
lambda value, nu: sp.chi2.logpdf(value, df=nu),
)
@pytest.mark.xfail(reason="Poor CDF in SciPy. See scipy/scipy#869 for details.")
def test_wald_scipy(self):
self.pymc3_matches_scipy(
Wald,
Rplus,
{"mu": Rplus, "alpha": Rplus},
lambda value, mu, alpha: sp.invgauss.logpdf(value, mu=mu, loc=alpha),
decimal=select_by_precision(float64=6, float32=1),
)
self.check_logcdf(
Wald,
Rplus,
{"mu": Rplus, "alpha": Rplus},
lambda value, mu, alpha: sp.invgauss.logcdf(value, mu=mu, loc=alpha),
)
@pytest.mark.parametrize(
"value,mu,lam,phi,alpha,logp",
[
(0.5, 0.001, 0.5, None, 0.0, -124500.7257914),
(1.0, 0.5, 0.001, None, 0.0, -4.3733162),
(2.0, 1.0, None, None, 0.0, -2.2086593),
(5.0, 2.0, 2.5, None, 0.0, -3.4374500),
(7.5, 5.0, None, 1.0, 0.0, -3.2199074),
(15.0, 10.0, None, 0.75, 0.0, -4.0360623),
(50.0, 15.0, None, 0.66666, 0.0, -6.1801249),
(0.5, 0.001, 0.5, None, 0.0, -124500.7257914),
(1.0, 0.5, 0.001, None, 0.5, -3.3330954),
(2.0, 1.0, None, None, 1.0, -0.9189385),
(5.0, 2.0, 2.5, None, 2.0, -2.2128783),
(7.5, 5.0, None, 1.0, 2.5, -2.5283764),
(15.0, 10.0, None, 0.75, 5.0, -3.3653647),
(50.0, 15.0, None, 0.666666, 10.0, -5.6481874),
],
)
def test_wald(self, value, mu, lam, phi, alpha, logp):
# Log probabilities calculated using the dIG function from the R package gamlss.
# See e.g., doi: 10.1111/j.1467-9876.2005.00510.x, or
# http://www.gamlss.org/.
with Model() as model:
Wald("wald", mu=mu, lam=lam, phi=phi, alpha=alpha, transform=None)
pt = {"wald": value}
decimals = select_by_precision(float64=6, float32=1)
assert_almost_equal(model.fastlogp(pt), logp, decimal=decimals, err_msg=str(pt))
def test_beta(self):
self.pymc3_matches_scipy(
Beta,
Unit,
{"alpha": Rplus, "beta": Rplus},
lambda value, alpha, beta: sp.beta.logpdf(value, alpha, beta),
)
self.pymc3_matches_scipy(Beta, Unit, {"mu": Unit, "sigma": Rplus}, beta_mu_sigma)
self.check_logcdf(
Beta,
Unit,
{"alpha": Rplus, "beta": Rplus},
lambda value, alpha, beta: sp.beta.logcdf(value, alpha, beta),
)
def test_kumaraswamy(self):
# Scipy does not have a built-in Kumaraswamy pdf
def scipy_log_pdf(value, a, b):
return (
np.log(a) + np.log(b) + (a - 1) * np.log(value) + (b - 1) * np.log(1 - value ** a)
)
self.pymc3_matches_scipy(Kumaraswamy, Unit, {"a": Rplus, "b": Rplus}, scipy_log_pdf)
def test_exponential(self):
self.pymc3_matches_scipy(
Exponential,
Rplus,
{"lam": Rplus},
lambda value, lam: sp.expon.logpdf(value, 0, 1 / lam),
)
self.check_logcdf(
Exponential,
Rplus,
{"lam": Rplus},
lambda value, lam: sp.expon.logcdf(value, 0, 1 / lam),
)
def test_geometric(self):
self.pymc3_matches_scipy(
Geometric, Nat, {"p": Unit}, lambda value, p: np.log(sp.geom.pmf(value, p))
)
def test_hypergeometric(self):
self.pymc3_matches_scipy(
HyperGeometric,
Nat,
{"N": NatSmall, "k": NatSmall, "n": NatSmall},
lambda value, N, k, n: sp.hypergeom.logpmf(value, N, k, n),
)
def test_negative_binomial(self):
def test_fun(value, mu, alpha):
return sp.nbinom.logpmf(value, alpha, 1 - mu / (mu + alpha))
self.pymc3_matches_scipy(NegativeBinomial, Nat, {"mu": Rplus, "alpha": Rplus}, test_fun)
self.pymc3_matches_scipy(
NegativeBinomial,
Nat,
{"p": Unit, "n": Rplus},
lambda value, p, n: sp.nbinom.logpmf(value, n, p),
)
@pytest.mark.parametrize(
"mu, p, alpha, n, expected",
[
(5, None, None, None, "Must specify either alpha or n."),
(None, 0.5, None, None, "Must specify either alpha or n."),
(None, None, None, None, "Must specify either alpha or n."),
(5, None, 2, 2, "Can't specify both alpha and n."),
(None, 0.5, 2, 2, "Can't specify both alpha and n."),
(None, None, 2, 2, "Can't specify both alpha and n."),
(None, None, 2, None, "Must specify either mu or p."),
(None, None, None, 2, "Must specify either mu or p."),
(5, 0.5, 2, None, "Can't specify both mu and p."),
(5, 0.5, None, 2, "Can't specify both mu and p."),
],
)
def test_negative_binomial_init_fail(self, mu, p, alpha, n, expected):
with Model():
with pytest.raises(ValueError, match=f"Incompatible parametrization. {expected}"):
NegativeBinomial("x", mu=mu, p=p, alpha=alpha, n=n)
def test_laplace(self):
self.pymc3_matches_scipy(
Laplace,
R,
{"mu": R, "b": Rplus},
lambda value, mu, b: sp.laplace.logpdf(value, mu, b),
)
self.check_logcdf(
Laplace,
R,
{"mu": R, "b": Rplus},
lambda value, mu, b: sp.laplace.logcdf(value, mu, b),
)
def test_lognormal(self):
self.pymc3_matches_scipy(
Lognormal,
Rplus,
{"mu": R, "tau": Rplusbig},
lambda value, mu, tau: floatX(sp.lognorm.logpdf(value, tau ** -0.5, 0, np.exp(mu))),
)
self.check_logcdf(
Lognormal,
Rplus,
{"mu": R, "tau": Rplusbig},
lambda value, mu, tau: sp.lognorm.logcdf(value, tau ** -0.5, 0, np.exp(mu)),
)
def test_t(self):
self.pymc3_matches_scipy(
StudentT,
R,
{"nu": Rplus, "mu": R, "lam": Rplus},
lambda value, nu, mu, lam: sp.t.logpdf(value, nu, mu, lam ** -0.5),
)
self.check_logcdf(
StudentT,
R,
{"nu": Rplus, "mu": R, "lam": Rplus},
lambda value, nu, mu, lam: sp.t.logcdf(value, nu, mu, lam ** -0.5),
n_samples=10,
)
def test_cauchy(self):
self.pymc3_matches_scipy(
Cauchy,
R,
{"alpha": R, "beta": Rplusbig},
lambda value, alpha, beta: sp.cauchy.logpdf(value, alpha, beta),
)
self.check_logcdf(
Cauchy,
R,
{"alpha": R, "beta": Rplusbig},
lambda value, alpha, beta: sp.cauchy.logcdf(value, alpha, beta),
)
def test_half_cauchy(self):
self.pymc3_matches_scipy(
HalfCauchy,
Rplus,
{"beta": Rplusbig},
lambda value, beta: sp.halfcauchy.logpdf(value, scale=beta),
)
self.check_logcdf(
HalfCauchy,
Rplus,
{"beta": Rplusbig},
lambda value, beta: sp.halfcauchy.logcdf(value, scale=beta),
)
def test_gamma(self):
self.pymc3_matches_scipy(
Gamma,
Rplus,
{"alpha": Rplusbig, "beta": Rplusbig},
lambda value, alpha, beta: sp.gamma.logpdf(value, alpha, scale=1.0 / beta),
)
def test_fun(value, mu, sigma):
return sp.gamma.logpdf(value, mu ** 2 / sigma ** 2, scale=1.0 / (mu / sigma ** 2))
self.pymc3_matches_scipy(Gamma, Rplus, {"mu": Rplusbig, "sigma": Rplusbig}, test_fun)
self.check_logcdf(
Gamma,
Rplus,
{"alpha": Rplusbig, "beta": Rplusbig},
lambda value, alpha, beta: sp.gamma.logcdf(value, alpha, scale=1.0 / beta),
)
@pytest.mark.xfail(
condition=(theano.config.floatX == "float32"),
reason="Fails on float32 due to numerical issues",
)
def test_inverse_gamma(self):
self.pymc3_matches_scipy(
InverseGamma,
Rplus,
{"alpha": Rplus, "beta": Rplus},
lambda value, alpha, beta: sp.invgamma.logpdf(value, alpha, scale=beta),
)
self.check_logcdf(
InverseGamma,
Rplus,
{"alpha": Rplus, "beta": Rplus},
lambda value, alpha, beta: sp.invgamma.logcdf(value, alpha, scale=beta),
)
@pytest.mark.xfail(
condition=(theano.config.floatX == "float32"),
reason="Fails on float32 due to scaling issues",
)
def test_inverse_gamma_alt_params(self):
def test_fun(value, mu, sigma):
alpha, beta = InverseGamma._get_alpha_beta(None, None, mu, sigma)
return sp.invgamma.logpdf(value, alpha, scale=beta)
self.pymc3_matches_scipy(InverseGamma, Rplus, {"mu": Rplus, "sigma": Rplus}, test_fun)
def test_pareto(self):
self.pymc3_matches_scipy(
Pareto,
Rplus,
{"alpha": Rplusbig, "m": Rplusbig},
lambda value, alpha, m: sp.pareto.logpdf(value, alpha, scale=m),
)
self.check_logcdf(
Pareto,
Rplus,
{"alpha": Rplusbig, "m": Rplusbig},
lambda value, alpha, m: sp.pareto.logcdf(value, alpha, scale=m),
)
@pytest.mark.xfail(
condition=(theano.config.floatX == "float32"),
reason="Fails on float32 due to inf issues",
)
def test_weibull(self):
self.pymc3_matches_scipy(
Weibull,
Rplus,
{"alpha": Rplusbig, "beta": Rplusbig},
lambda value, alpha, beta: sp.exponweib.logpdf(value, 1, alpha, scale=beta),
)
self.check_logcdf(
Weibull,
Rplus,
{"alpha": Rplusbig, "beta": Rplusbig},
lambda value, alpha, beta: sp.exponweib.logcdf(value, 1, alpha, scale=beta),
)
def test_half_studentt(self):
# this is only testing for nu=1 (halfcauchy)
self.pymc3_matches_scipy(
HalfStudentT,
Rplus,
{"sigma": Rplus},
lambda value, sigma: sp.halfcauchy.logpdf(value, 0, sigma),
)
def test_skew_normal(self):
self.pymc3_matches_scipy(
SkewNormal,
R,
{"mu": R, "sigma": Rplusbig, "alpha": R},
lambda value, alpha, mu, sigma: sp.skewnorm.logpdf(value, alpha, mu, sigma),
)
def test_binomial(self):
self.pymc3_matches_scipy(
Binomial,
Nat,
{"n": NatSmall, "p": Unit},
lambda value, n, p: sp.binom.logpmf(value, n, p),
)
# Too lazy to propagate decimal parameter through the whole chain of deps
@pytest.mark.xfail(condition=(theano.config.floatX == "float32"), reason="Fails on float32")
def test_beta_binomial(self):
self.checkd(BetaBinomial, Nat, {"alpha": Rplus, "beta": Rplus, "n": NatSmall})
def test_bernoulli(self):
self.pymc3_matches_scipy(
Bernoulli,
Bool,
{"logit_p": R},
lambda value, logit_p: sp.bernoulli.logpmf(value, scipy.special.expit(logit_p)),
)
self.pymc3_matches_scipy(
Bernoulli, Bool, {"p": Unit}, lambda value, p: sp.bernoulli.logpmf(value, p)
)
def test_discrete_weibull(self):
self.pymc3_matches_scipy(
DiscreteWeibull,
Nat,
{"q": Unit, "beta": Rplusdunif},
discrete_weibull_logpmf,
)
def test_poisson(self):
self.pymc3_matches_scipy(
Poisson, Nat, {"mu": Rplus}, lambda value, mu: sp.poisson.logpmf(value, mu)
)
def test_bound_poisson(self):
NonZeroPoisson = Bound(Poisson, lower=1.0)
self.pymc3_matches_scipy(
NonZeroPoisson,
PosNat,
{"mu": Rplus},
lambda value, mu: sp.poisson.logpmf(value, mu),
)
with Model():
x = NonZeroPoisson("x", mu=4)
assert np.isinf(x.logp({"x": 0}))
def test_constantdist(self):
self.pymc3_matches_scipy(Constant, I, {"c": I}, lambda value, c: np.log(c == value))
# Too lazy to propagate decimal parameter through the whole chain of deps
@pytest.mark.xfail(condition=(theano.config.floatX == "float32"), reason="Fails on float32")
def test_zeroinflatedpoisson(self):
self.checkd(ZeroInflatedPoisson, Nat, {"theta": Rplus, "psi": Unit})
# Too lazy to propagate decimal parameter through the whole chain of deps
@pytest.mark.xfail(condition=(theano.config.floatX == "float32"), reason="Fails on float32")
def test_zeroinflatednegativebinomial(self):
self.checkd(
ZeroInflatedNegativeBinomial,
Nat,
{"mu": Rplusbig, "alpha": Rplusbig, "psi": Unit},
)
# Too lazy to propagate decimal parameter through the whole chain of deps
@pytest.mark.xfail(condition=(theano.config.floatX == "float32"), reason="Fails on float32")
def test_zeroinflatedbinomial(self):
self.checkd(ZeroInflatedBinomial, Nat, {"n": NatSmall, "p": Unit, "psi": Unit})
@pytest.mark.parametrize("n", [1, 2, 3])
def test_mvnormal(self, n):
self.pymc3_matches_scipy(
MvNormal,
RealMatrix(5, n),
{"mu": Vector(R, n), "tau": PdMatrix(n)},
normal_logpdf_tau,
)
self.pymc3_matches_scipy(
MvNormal,
Vector(R, n),
{"mu": Vector(R, n), "tau": PdMatrix(n)},
normal_logpdf_tau,
)
self.pymc3_matches_scipy(
MvNormal,
RealMatrix(5, n),
{"mu": Vector(R, n), "cov": PdMatrix(n)},
normal_logpdf_cov,
)
self.pymc3_matches_scipy(
MvNormal,
Vector(R, n),
{"mu": Vector(R, n), "cov": PdMatrix(n)},
normal_logpdf_cov,
)
self.pymc3_matches_scipy(
MvNormal,
RealMatrix(5, n),
{"mu": Vector(R, n), "chol": PdMatrixChol(n)},
normal_logpdf_chol,
decimal=select_by_precision(float64=6, float32=-1),
)
self.pymc3_matches_scipy(
MvNormal,
Vector(R, n),
{"mu": Vector(R, n), "chol": PdMatrixChol(n)},
normal_logpdf_chol,
decimal=select_by_precision(float64=6, float32=0),
)
def MvNormalUpper(*args, **kwargs):
return MvNormal(lower=False, *args, **kwargs)
self.pymc3_matches_scipy(
MvNormalUpper,
Vector(R, n),
{"mu": Vector(R, n), "chol": PdMatrixCholUpper(n)},
normal_logpdf_chol_upper,
decimal=select_by_precision(float64=6, float32=0),
)
@pytest.mark.xfail(
condition=(theano.config.floatX == "float32"),
reason="Fails on float32 due to inf issues",
)
def test_mvnormal_indef(self):
cov_val = np.array([[1, 0.5], [0.5, -2]])
cov = tt.matrix("cov")
cov.tag.test_value = np.eye(2)
mu = floatX(np.zeros(2))
x = tt.vector("x")
x.tag.test_value = np.zeros(2)
logp = MvNormal.dist(mu=mu, cov=cov).logp(x)
f_logp = theano.function([cov, x], logp)
assert f_logp(cov_val, np.ones(2)) == -np.inf
dlogp = tt.grad(logp, cov)
f_dlogp = theano.function([cov, x], dlogp)
assert not np.all(np.isfinite(f_dlogp(cov_val, np.ones(2))))
logp = MvNormal.dist(mu=mu, tau=cov).logp(x)
f_logp = theano.function([cov, x], logp)
assert f_logp(cov_val, np.ones(2)) == -np.inf
dlogp = tt.grad(logp, cov)
f_dlogp = theano.function([cov, x], dlogp)
assert not np.all(np.isfinite(f_dlogp(cov_val, np.ones(2))))
def test_mvnormal_init_fail(self):
with Model():
with pytest.raises(ValueError):
x = MvNormal("x", mu=np.zeros(3), shape=3)
with pytest.raises(ValueError):
x = MvNormal("x", mu=np.zeros(3), cov=np.eye(3), tau=np.eye(3), shape=3)
@pytest.mark.parametrize("n", [1, 2, 3])
def test_matrixnormal(self, n):
mat_scale = 1e3 # To reduce logp magnitude
mean_scale = 0.1
self.pymc3_matches_scipy(
MatrixNormal,
RealMatrix(n, n),
{
"mu": RealMatrix(n, n) * mean_scale,
"rowcov": PdMatrix(n) * mat_scale,
"colcov": PdMatrix(n) * mat_scale,
},
matrix_normal_logpdf_cov,
)
self.pymc3_matches_scipy(
MatrixNormal,
RealMatrix(2, n),
{
"mu": RealMatrix(2, n) * mean_scale,
"rowcov": PdMatrix(2) * mat_scale,
"colcov": PdMatrix(n) * mat_scale,
},
matrix_normal_logpdf_cov,
)
self.pymc3_matches_scipy(
MatrixNormal,
RealMatrix(3, n),
{
"mu": RealMatrix(3, n) * mean_scale,
"rowchol": PdMatrixChol(3) * mat_scale,
"colchol": PdMatrixChol(n) * mat_scale,
},
matrix_normal_logpdf_chol,
decimal=select_by_precision(float64=6, float32=-1),
)
self.pymc3_matches_scipy(
MatrixNormal,
RealMatrix(n, 3),
{
"mu": RealMatrix(n, 3) * mean_scale,
"rowchol": PdMatrixChol(n) * mat_scale,
"colchol": PdMatrixChol(3) * mat_scale,
},
matrix_normal_logpdf_chol,
decimal=select_by_precision(float64=6, float32=0),
)
@pytest.mark.parametrize("n", [2, 3])
@pytest.mark.parametrize("m", [3])
@pytest.mark.parametrize("sigma", [None, 1.0])
def test_kroneckernormal(self, n, m, sigma):
np.random.seed(5)
N = n * m
covs = [RandomPdMatrix(n), RandomPdMatrix(m)]
chols = list(map(np.linalg.cholesky, covs))
evds = list(map(np.linalg.eigh, covs))
dom = Domain([np.random.randn(N) * 0.1], edges=(None, None), shape=N)
mu = Domain([np.random.randn(N) * 0.1], edges=(None, None), shape=N)
std_args = {"mu": mu}
cov_args = {"covs": covs}
chol_args = {"chols": chols}
evd_args = {"evds": evds}
if sigma is not None and sigma != 0:
std_args["sigma"] = Domain([sigma], edges=(None, None))
else:
for args in [cov_args, chol_args, evd_args]:
args["sigma"] = sigma
self.pymc3_matches_scipy(
KroneckerNormal,
dom,
std_args,
kron_normal_logpdf_cov,
extra_args=cov_args,
scipy_args=cov_args,
)
self.pymc3_matches_scipy(
KroneckerNormal,
dom,
std_args,
kron_normal_logpdf_chol,
extra_args=chol_args,
scipy_args=chol_args,
)
self.pymc3_matches_scipy(
KroneckerNormal,
dom,
std_args,
kron_normal_logpdf_evd,
extra_args=evd_args,
scipy_args=evd_args,
)
dom = Domain([np.random.randn(2, N) * 0.1], edges=(None, None), shape=(2, N))
self.pymc3_matches_scipy(
KroneckerNormal,
dom,
std_args,
kron_normal_logpdf_cov,
extra_args=cov_args,
scipy_args=cov_args,
)
self.pymc3_matches_scipy(
KroneckerNormal,
dom,
std_args,
kron_normal_logpdf_chol,
extra_args=chol_args,
scipy_args=chol_args,
)
self.pymc3_matches_scipy(
KroneckerNormal,
dom,
std_args,
kron_normal_logpdf_evd,
extra_args=evd_args,
scipy_args=evd_args,
)
@pytest.mark.parametrize("n", [1, 2])
def test_mvt(self, n):
self.pymc3_matches_scipy(
MvStudentT,
Vector(R, n),
{"nu": Rplus, "Sigma": PdMatrix(n), "mu": Vector(R, n)},
mvt_logpdf,
)
self.pymc3_matches_scipy(
MvStudentT,
RealMatrix(2, n),
{"nu": Rplus, "Sigma": PdMatrix(n), "mu": Vector(R, n)},
mvt_logpdf,
)
@pytest.mark.parametrize("n", [2, 3, 4])
def test_AR1(self, n):
self.pymc3_matches_scipy(AR1, Vector(R, n), {"k": Unit, "tau_e": Rplus}, AR1_logpdf)
@pytest.mark.parametrize("n", [2, 3])
def test_wishart(self, n):
# This check compares the autodiff gradient to the numdiff gradient.
# However, due to the strict constraints of the wishart,
# it is impossible to numerically determine the gradient as a small
# pertubation breaks the symmetry. Thus disabling. Also, numdifftools was
# removed in June 2019, so an alternative would be needed.
#
# self.checkd(Wishart, PdMatrix(n), {'n': Domain([2, 3, 4, 2000]), 'V': PdMatrix(n)},
# checks=[self.check_dlogp])
pass
@pytest.mark.parametrize("x,eta,n,lp", LKJ_CASES)
def test_lkj(self, x, eta, n, lp):
with Model() as model:
LKJCorr("lkj", eta=eta, n=n, transform=None)
pt = {"lkj": x}
decimals = select_by_precision(float64=6, float32=4)
assert_almost_equal(model.fastlogp(pt), lp, decimal=decimals, err_msg=str(pt))
@pytest.mark.parametrize("n", [2, 3])
def test_dirichlet(self, n):
self.pymc3_matches_scipy(Dirichlet, Simplex(n), {"a": Vector(Rplus, n)}, dirichlet_logpdf)
def test_dirichlet_shape(self):
a = tt.as_tensor_variable(np.r_[1, 2])
with pytest.warns(DeprecationWarning):
dir_rv = Dirichlet.dist(a)
assert dir_rv.shape == (2,)
with pytest.warns(DeprecationWarning), theano.change_flags(compute_test_value="ignore"):
dir_rv = Dirichlet.dist(tt.vector())
def test_dirichlet_2D(self):
self.pymc3_matches_scipy(
Dirichlet,
MultiSimplex(2, 2),
{"a": Vector(Vector(Rplus, 2), 2)},
dirichlet_logpdf,
)
@pytest.mark.parametrize("n", [2, 3])
def test_multinomial(self, n):
self.pymc3_matches_scipy(
Multinomial, Vector(Nat, n), {"p": Simplex(n), "n": Nat}, multinomial_logpdf
)
@pytest.mark.parametrize(
"p,n",
[
[[0.25, 0.25, 0.25, 0.25], 1],
[[0.3, 0.6, 0.05, 0.05], 2],
[[0.3, 0.6, 0.05, 0.05], 10],
],
)
def test_multinomial_mode(self, p, n):
_p = np.array(p)
with Model() as model:
m = Multinomial("m", n, _p, _p.shape)
assert_allclose(m.distribution.mode.eval().sum(), n)
_p = np.array([p, p])
with Model() as model:
m = Multinomial("m", n, _p, _p.shape)
assert_allclose(m.distribution.mode.eval().sum(axis=-1), n)
@pytest.mark.parametrize(
"p, shape, n",
[
[[0.25, 0.25, 0.25, 0.25], 4, 2],
[[0.25, 0.25, 0.25, 0.25], (1, 4), 3],
# 3: expect to fail
# [[.25, .25, .25, .25], (10, 4)],
[[0.25, 0.25, 0.25, 0.25], (10, 1, 4), 5],
# 5: expect to fail
# [[[.25, .25, .25, .25]], (2, 4), [7, 11]],
[[[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]], (2, 4), 13],
[[[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]], (1, 2, 4), [23, 29]],
[
[[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]],
(10, 2, 4),
[31, 37],
],
[[[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]], (2, 4), [17, 19]],
],
)
def test_multinomial_random(self, p, shape, n):
p = np.asarray(p)
with Model() as model:
m = Multinomial("m", n=n, p=p, shape=shape)
m.random()
def test_multinomial_mode_with_shape(self):
n = [1, 10]
p = np.asarray([[0.25, 0.25, 0.25, 0.25], [0.26, 0.26, 0.26, 0.22]])
with Model() as model:
m = Multinomial("m", n=n, p=p, shape=(2, 4))
assert_allclose(m.distribution.mode.eval().sum(axis=-1), n)
def test_multinomial_vec(self):
vals = np.array([[2, 4, 4], [3, 3, 4]])
p = np.array([0.2, 0.3, 0.5])
n = 10
with Model() as model_single:
Multinomial("m", n=n, p=p, shape=len(p))
with Model() as model_many:
Multinomial("m", n=n, p=p, shape=vals.shape)
assert_almost_equal(
scipy.stats.multinomial.logpmf(vals, n, p),
np.asarray([model_single.fastlogp({"m": val}) for val in vals]),
decimal=4,
)
assert_almost_equal(
scipy.stats.multinomial.logpmf(vals, n, p),
model_many.free_RVs[0].logp_elemwise({"m": vals}).squeeze(),
decimal=4,
)
assert_almost_equal(
sum([model_single.fastlogp({"m": val}) for val in vals]),
model_many.fastlogp({"m": vals}),
decimal=4,
)
def test_multinomial_vec_1d_n(self):
vals = np.array([[2, 4, 4], [4, 3, 4]])
p = np.array([0.2, 0.3, 0.5])
ns = np.array([10, 11])
with Model() as model:
Multinomial("m", n=ns, p=p, shape=vals.shape)
assert_almost_equal(
sum([multinomial_logpdf(val, n, p) for val, n in zip(vals, ns)]),
model.fastlogp({"m": vals}),
decimal=4,
)
def test_multinomial_vec_1d_n_2d_p(self):
vals = np.array([[2, 4, 4], [4, 3, 4]])
ps = np.array([[0.2, 0.3, 0.5], [0.9, 0.09, 0.01]])
ns = np.array([10, 11])
with Model() as model:
Multinomial("m", n=ns, p=ps, shape=vals.shape)
assert_almost_equal(
sum([multinomial_logpdf(val, n, p) for val, n, p in zip(vals, ns, ps)]),
model.fastlogp({"m": vals}),
decimal=4,
)
def test_multinomial_vec_2d_p(self):
vals = np.array([[2, 4, 4], [3, 3, 4]])
ps = np.array([[0.2, 0.3, 0.5], [0.3, 0.3, 0.4]])
n = 10
with Model() as model:
Multinomial("m", n=n, p=ps, shape=vals.shape)
assert_almost_equal(
sum([multinomial_logpdf(val, n, p) for val, p in zip(vals, ps)]),
model.fastlogp({"m": vals}),
decimal=4,
)
def test_batch_multinomial(self):
n = 10
vals = np.zeros((4, 5, 3), dtype="int32")
p = np.zeros_like(vals, dtype=theano.config.floatX)
inds = np.random.randint(vals.shape[-1], size=vals.shape[:-1])[..., None]
np.put_along_axis(vals, inds, n, axis=-1)
np.put_along_axis(p, inds, 1, axis=-1)
dist = Multinomial.dist(n=n, p=p, shape=vals.shape)
value = tt.tensor3(dtype="int32")
value.tag.test_value = np.zeros_like(vals, dtype="int32")
logp = tt.exp(dist.logp(value))
f = theano.function(inputs=[value], outputs=logp)
assert_almost_equal(
f(vals),
np.ones(vals.shape[:-1] + (1,)),
decimal=select_by_precision(float64=6, float32=3),
)
sample = dist.random(size=2)
assert_allclose(sample, np.stack([vals, vals], axis=0))
def test_categorical_bounds(self):
with Model():
x = Categorical("x", p=np.array([0.2, 0.3, 0.5]))
assert np.isinf(x.logp({"x": -1}))
assert np.isinf(x.logp({"x": 3}))
def test_categorical_valid_p(self):
with Model():
x = Categorical("x", p=np.array([-0.2, 0.3, 0.5]))
assert np.isinf(x.logp({"x": 0}))
assert np.isinf(x.logp({"x": 1}))
assert np.isinf(x.logp({"x": 2}))
with Model():
# A model where p sums to 1 but contains negative values
x = Categorical("x", p=np.array([-0.2, 0.7, 0.5]))
assert np.isinf(x.logp({"x": 0}))
assert np.isinf(x.logp({"x": 1}))
assert np.isinf(x.logp({"x": 2}))
with Model():
# Hard edge case from #2082
# Early automatic normalization of p's sum would hide the negative
# entries if there is a single or pair number of negative values
# and the rest are zero
x = Categorical("x", p=np.array([-1, -1, 0, 0]))
assert np.isinf(x.logp({"x": 0}))
assert np.isinf(x.logp({"x": 1}))
assert np.isinf(x.logp({"x": 2}))
assert np.isinf(x.logp({"x": 3}))
@pytest.mark.parametrize("n", [2, 3, 4])
def test_categorical(self, n):
self.pymc3_matches_scipy(
Categorical,
Domain(range(n), "int64"),
{"p": Simplex(n)},
lambda value, p: categorical_logpdf(value, p),
)
@pytest.mark.parametrize("n", [2, 3, 4])
def test_orderedlogistic(self, n):
self.pymc3_matches_scipy(
OrderedLogistic,
Domain(range(n), "int64"),
{"eta": R, "cutpoints": Vector(R, n - 1)},
lambda value, eta, cutpoints: orderedlogistic_logpdf(value, eta, cutpoints),
)
def test_densitydist(self):
def logp(x):
return -log(2 * 0.5) - abs(x - 0.5) / 0.5
self.checkd(DensityDist, R, {}, extra_args={"logp": logp})
def test_get_tau_sigma(self):
sigma = np.array([2])
assert_almost_equal(continuous.get_tau_sigma(sigma=sigma), [1.0 / sigma ** 2, sigma])
@pytest.mark.parametrize(
"value,mu,sigma,nu,logp",
[
(0.5, -50.000, 0.500, 0.500, -99.8068528),
(1.0, -1.000, 0.001, 0.001, -1992.5922447),
(2.0, 0.001, 1.000, 1.000, -1.6720416),
(5.0, 0.500, 2.500, 2.500, -2.4543644),
(7.5, 2.000, 5.000, 5.000, -2.8259429),
(15.0, 5.000, 7.500, 7.500, -3.3093854),
(50.0, 50.000, 10.000, 10.000, -3.6436067),
(1000.0, 500.000, 10.000, 20.000, -27.8707323),
],
)
def test_ex_gaussian(self, value, mu, sigma, nu, logp):
"""Log probabilities calculated using the dexGAUS function from the R package gamlss.
See e.g., doi: 10.1111/j.1467-9876.2005.00510.x, or http://www.gamlss.org/."""
with Model() as model:
ExGaussian("eg", mu=mu, sigma=sigma, nu=nu)
pt = {"eg": value}
assert_almost_equal(
model.fastlogp(pt),
logp,
decimal=select_by_precision(float64=6, float32=2),
err_msg=str(pt),
)
@pytest.mark.parametrize(
"value,mu,sigma,nu,logcdf",
[
(0.5, -50.000, 0.500, 0.500, 0.0000000),
(1.0, -1.000, 0.001, 0.001, 0.0000000),
(2.0, 0.001, 1.000, 1.000, -0.2365674),
(5.0, 0.500, 2.500, 2.500, -0.2886489),
(7.5, 2.000, 5.000, 5.000, -0.5655104),
(15.0, 5.000, 7.500, 7.500, -0.4545255),
(50.0, 50.000, 10.000, 10.000, -1.433714),
(1000.0, 500.000, 10.000, 20.000, -1.573708e-11),
],
)
def test_ex_gaussian_cdf(self, value, mu, sigma, nu, logcdf):
"""Log probabilities calculated using the pexGAUS function from the R package gamlss.
See e.g., doi: 10.1111/j.1467-9876.2005.00510.x, or http://www.gamlss.org/."""
assert_almost_equal(
ExGaussian.dist(mu=mu, sigma=sigma, nu=nu).logcdf(value).tag.test_value,
logcdf,
decimal=select_by_precision(float64=6, float32=2),
err_msg=str((value, mu, sigma, nu, logcdf)),
)
@pytest.mark.xfail(condition=(theano.config.floatX == "float32"), reason="Fails on float32")
def test_vonmises(self):
self.pymc3_matches_scipy(
VonMises,
R,
{"mu": Circ, "kappa": Rplus},
lambda value, mu, kappa: floatX(sp.vonmises.logpdf(value, kappa, loc=mu)),
)
def test_gumbel(self):
def gumbel(value, mu, beta):
return floatX(sp.gumbel_r.logpdf(value, loc=mu, scale=beta))
self.pymc3_matches_scipy(Gumbel, R, {"mu": R, "beta": Rplusbig}, gumbel)
def gumbellcdf(value, mu, beta):
return floatX(sp.gumbel_r.logcdf(value, loc=mu, scale=beta))
self.check_logcdf(Gumbel, R, {"mu": R, "beta": Rplusbig}, gumbellcdf)
def test_logistic(self):
self.pymc3_matches_scipy(
Logistic,
R,
{"mu": R, "s": Rplus},
lambda value, mu, s: sp.logistic.logpdf(value, mu, s),
decimal=select_by_precision(float64=6, float32=1),
)
self.check_logcdf(
Logistic,
R,
{"mu": R, "s": Rplus},
lambda value, mu, s: sp.logistic.logcdf(value, mu, s),
decimal=select_by_precision(float64=6, float32=1),
)
def test_logitnormal(self):
self.pymc3_matches_scipy(
LogitNormal,
Unit,
{"mu": R, "sigma": Rplus},
lambda value, mu, sigma: (
sp.norm.logpdf(logit(value), mu, sigma) - (np.log(value) + np.log1p(-value))
),
decimal=select_by_precision(float64=6, float32=1),
)
def test_multidimensional_beta_construction(self):
with Model():
Beta("beta", alpha=1.0, beta=1.0, shape=(10, 20))
def test_rice(self):
self.pymc3_matches_scipy(
Rice,
Rplus,
{"nu": Rplus, "sigma": Rplusbig},
lambda value, nu, sigma: sp.rice.logpdf(value, b=nu / sigma, loc=0, scale=sigma),
)
self.pymc3_matches_scipy(
Rice,
Rplus,
{"b": Rplus, "sigma": Rplusbig},
lambda value, b, sigma: sp.rice.logpdf(value, b=b, loc=0, scale=sigma),
)
@pytest.mark.xfail(condition=(theano.config.floatX == "float32"), reason="Fails on float32")
def test_moyal(self):
self.pymc3_matches_scipy(
Moyal,
R,
{"mu": R, "sigma": Rplusbig},
lambda value, mu, sigma: floatX(sp.moyal.logpdf(value, mu, sigma)),
)
self.check_logcdf(
Moyal,
R,
{"mu": R, "sigma": Rplusbig},
lambda value, mu, sigma: floatX(sp.moyal.logcdf(value, mu, sigma)),
)
@pytest.mark.xfail(condition=(theano.config.floatX == "float32"), reason="Fails on float32")
def test_interpolated(self):
for mu in R.vals:
for sigma in Rplus.vals:
# pylint: disable=cell-var-from-loop
xmin = mu - 5 * sigma
xmax = mu + 5 * sigma
class TestedInterpolated(Interpolated):
def __init__(self, **kwargs):
x_points = np.linspace(xmin, xmax, 100000)
pdf_points = sp.norm.pdf(x_points, loc=mu, scale=sigma)
super().__init__(x_points=x_points, pdf_points=pdf_points, **kwargs)
def ref_pdf(value):
return np.where(
np.logical_and(value >= xmin, value <= xmax),
sp.norm.logpdf(value, mu, sigma),
-np.inf * np.ones(value.shape),
)
self.pymc3_matches_scipy(TestedInterpolated, R, {}, ref_pdf)
def test_bound():
np.random.seed(42)
UnboundNormal = Bound(Normal)
dist = UnboundNormal.dist(mu=0, sigma=1)
assert dist.transform is None
assert dist.default() == 0.0
assert isinstance(dist.random(), np.ndarray)
LowerNormal = Bound(Normal, lower=1)
dist = LowerNormal.dist(mu=0, sigma=1)
assert dist.logp(0).eval() == -np.inf
assert dist.default() > 1
assert dist.transform is not None
assert np.all(dist.random() > 1)
UpperNormal = Bound(Normal, upper=-1)
dist = UpperNormal.dist(mu=0, sigma=1)
assert dist.logp(-0.5).eval() == -np.inf
assert dist.default() < -1
assert dist.transform is not None
assert np.all(dist.random() < -1)
ArrayNormal = Bound(Normal, lower=[1, 2], upper=[2, 3])
dist = ArrayNormal.dist(mu=0, sigma=1, shape=2)
assert_equal(dist.logp([0.5, 3.5]).eval(), -np.array([np.inf, np.inf]))
assert_equal(dist.default(), np.array([1.5, 2.5]))
assert dist.transform is not None
with pytest.raises(ValueError) as err:
dist.random()
err.match("Drawing samples from distributions with array-valued")
with Model():
a = ArrayNormal("c", shape=2)
assert_equal(a.tag.test_value, np.array([1.5, 2.5]))
lower = tt.vector("lower")
lower.tag.test_value = np.array([1, 2]).astype(theano.config.floatX)
upper = 3
ArrayNormal = Bound(Normal, lower=lower, upper=upper)
dist = ArrayNormal.dist(mu=0, sigma=1, shape=2)
logp = dist.logp([0.5, 3.5]).eval({lower: lower.tag.test_value})
assert_equal(logp, -np.array([np.inf, np.inf]))
assert_equal(dist.default(), np.array([2, 2.5]))
assert dist.transform is not None
with Model():
a = ArrayNormal("c", shape=2)
assert_equal(a.tag.test_value, np.array([2, 2.5]))
rand = Bound(Binomial, lower=10).dist(n=20, p=0.3).random()
assert rand.dtype in [np.int16, np.int32, np.int64]
assert rand >= 10
rand = Bound(Binomial, upper=10).dist(n=20, p=0.8).random()
assert rand.dtype in [np.int16, np.int32, np.int64]
assert rand <= 10
rand = Bound(Binomial, lower=5, upper=8).dist(n=10, p=0.6).random()
assert rand.dtype in [np.int16, np.int32, np.int64]
assert rand >= 5 and rand <= 8
with Model():
BoundPoisson = Bound(Poisson, upper=6)
BoundPoisson(name="y", mu=1)
with Model():
BoundNormalNamedArgs = Bound(Normal, upper=6)("y", mu=2.0, sd=1.0)
BoundNormalPositionalArgs = Bound(Normal, upper=6)("x", 2.0, 1.0)
with Model():
BoundPoissonNamedArgs = Bound(Poisson, upper=6)("y", mu=2.0)
BoundPoissonPositionalArgs = Bound(Poisson, upper=6)("x", 2.0)
class TestStrAndLatexRepr:
def setup_class(self):
# True parameter values
alpha, sigma = 1, 1
beta = [1, 2.5]
# Size of dataset
size = 100
# Predictor variable
X = np.random.normal(size=(size, 2)).dot(np.array([[1, 0], [0, 0.2]]))
# Simulate outcome variable
Y = alpha + X.dot(beta) + np.random.randn(size) * sigma
with Model() as self.model:
# Priors for unknown model parameters
alpha = Normal("alpha", mu=0, sigma=10)
b = Normal("beta", mu=0, sigma=10, shape=(2,), observed=beta)
sigma = HalfNormal("sigma", sigma=1)
# Test Cholesky parameterization
Z = MvNormal("Z", mu=np.zeros(2), chol=np.eye(2), shape=(2,))
# NegativeBinomial representations to test issue 4186
nb1 = pm.NegativeBinomial(
"nb_with_mu_alpha", mu=pm.Normal("nbmu"), alpha=pm.Gamma("nbalpha", mu=6, sigma=1)
)
nb2 = pm.NegativeBinomial("nb_with_p_n", p=pm.Uniform("nbp"), n=10)
# Expected value of outcome
mu = Deterministic("mu", floatX(alpha + tt.dot(X, b)))
# add a bounded variable as well
bound_var = Bound(Normal, lower=1.0)("bound_var", mu=0, sigma=10)
# KroneckerNormal
n, m = 3, 4
covs = [np.eye(n), np.eye(m)]
kron_normal = KroneckerNormal("kron_normal", mu=np.zeros(n * m), covs=covs, shape=n * m)
# MatrixNormal
matrix_normal = MatrixNormal(
"mat_normal",
mu=np.random.normal(size=n),
rowcov=np.eye(n),
colchol=np.linalg.cholesky(np.eye(n)),
shape=(n, n),
)
# Likelihood (sampling distribution) of observations
Y_obs = Normal("Y_obs", mu=mu, sigma=sigma, observed=Y)
self.distributions = [alpha, sigma, mu, b, Z, nb1, nb2, Y_obs, bound_var]
self.expected = {
"latex": (
r"$\text{alpha} \sim \text{Normal}$",
r"$\text{sigma} \sim \text{HalfNormal}$",
r"$\text{mu} \sim \text{Deterministic}$",
r"$\text{beta} \sim \text{Normal}$",
r"$\text{Z} \sim \text{MvNormal}$",
r"$\text{nb_with_mu_alpha} \sim \text{NegativeBinomial}$",
r"$\text{nb_with_p_n} \sim \text{NegativeBinomial}$",
r"$\text{Y_obs} \sim \text{Normal}$",
r"$\text{bound_var} \sim \text{Bound}$ -- \text{Normal}$",
r"$\text{kron_normal} \sim \text{KroneckerNormal}$",
r"$\text{mat_normal} \sim \text{MatrixNormal}$",
),
"plain": (
r"alpha ~ Normal",
r"sigma ~ HalfNormal",
r"mu ~ Deterministic",
r"beta ~ Normal",
r"Z ~ MvNormal",
r"nb_with_mu_alpha ~ NegativeBinomial",
r"nb_with_p_n ~ NegativeBinomial",
r"Y_obs ~ Normal",
r"bound_var ~ Bound-Normal",
r"kron_normal ~ KroneckerNormal",
r"mat_normal ~ MatrixNormal",
),
"latex_with_params": (
r"$\text{alpha} \sim \text{Normal}(\mathit{mu}=0.0,~\mathit{sigma}=10.0)$",
r"$\text{sigma} \sim \text{HalfNormal}(\mathit{sigma}=1.0)$",
r"$\text{mu} \sim \text{Deterministic}(\text{alpha},~\text{Constant},~\text{beta})$",
r"$\text{beta} \sim \text{Normal}(\mathit{mu}=0.0,~\mathit{sigma}=10.0)$",
r"$\text{Z} \sim \text{MvNormal}(\mathit{mu}=array,~\mathit{chol_cov}=array)$",
r"$\text{nb_with_mu_alpha} \sim \text{NegativeBinomial}(\mathit{mu}=\text{nbmu},~\mathit{alpha}=\text{nbalpha})$",
r"$\text{nb_with_p_n} \sim \text{NegativeBinomial}(\mathit{p}=\text{nbp},~\mathit{n}=10)$",
r"$\text{Y_obs} \sim \text{Normal}(\mathit{mu}=\text{mu},~\mathit{sigma}=f(\text{sigma}))$",
r"$\text{bound_var} \sim \text{Bound}(\mathit{lower}=1.0,~\mathit{upper}=\text{None})$ -- \text{Normal}(\mathit{mu}=0.0,~\mathit{sigma}=10.0)$",
r"$\text{kron_normal} \sim \text{KroneckerNormal}(\mathit{mu}=array)$",
r"$\text{mat_normal} \sim \text{MatrixNormal}(\mathit{mu}=array,~\mathit{rowcov}=array,~\mathit{colchol_cov}=array)$",
),
"plain_with_params": (
r"alpha ~ Normal(mu=0.0, sigma=10.0)",
r"sigma ~ HalfNormal(sigma=1.0)",
r"mu ~ Deterministic(alpha, Constant, beta)",
r"beta ~ Normal(mu=0.0, sigma=10.0)",
r"Z ~ MvNormal(mu=array, chol_cov=array)",
r"nb_with_mu_alpha ~ NegativeBinomial(mu=nbmu, alpha=nbalpha)",
r"nb_with_p_n ~ NegativeBinomial(p=nbp, n=10)",
r"Y_obs ~ Normal(mu=mu, sigma=f(sigma))",
r"bound_var ~ Bound(lower=1.0, upper=None)-Normal(mu=0.0, sigma=10.0)",
r"kron_normal ~ KroneckerNormal(mu=array)",
r"mat_normal ~ MatrixNormal(mu=array, rowcov=array, colchol_cov=array)",
),
}
def test__repr_latex_(self):
for distribution, tex in zip(self.distributions, self.expected["latex_with_params"]):
assert distribution._repr_latex_() == tex
model_tex = self.model._repr_latex_()
# make sure each variable is in the model
for tex in self.expected["latex"]:
for segment in tex.strip("$").split(r"\sim"):
assert segment in model_tex
def test___latex__(self):
for distribution, tex in zip(self.distributions, self.expected["latex_with_params"]):
assert distribution._repr_latex_() == distribution.__latex__()
assert self.model._repr_latex_() == self.model.__latex__()
def test___str__(self):
for distribution, str_repr in zip(self.distributions, self.expected["plain"]):
assert distribution.__str__() == str_repr
model_str = self.model.__str__()
for str_repr in self.expected["plain"]:
assert str_repr in model_str
def test_str(self):
for distribution, str_repr in zip(self.distributions, self.expected["plain"]):
assert str(distribution) == str_repr
model_str = str(self.model)
for str_repr in self.expected["plain"]:
assert str_repr in model_str
def test_discrete_trafo():
with pytest.raises(ValueError) as err:
Binomial.dist(n=5, p=0.5, transform="log")
err.match("Transformations for discrete distributions")
with Model():
with pytest.raises(ValueError) as err:
Binomial("a", n=5, p=0.5, transform="log")
err.match("Transformations for discrete distributions")
@pytest.mark.parametrize("shape", [tuple(), (1,), (3, 1), (3, 2)], ids=str)
def test_orderedlogistic_dimensions(shape):
# Test for issue #3535
loge = np.log10(np.exp(1))
size = 7
p = np.ones(shape + (10,)) / 10
cutpoints = np.tile(logit(np.linspace(0, 1, 11)[1:-1]), shape + (1,))
obs = np.random.randint(0, 1, size=(size,) + shape)
with Model():
ol = OrderedLogistic(
"ol", eta=np.zeros(shape), cutpoints=cutpoints, shape=shape, observed=obs
)
c = Categorical("c", p=p, shape=shape, observed=obs)
ologp = ol.logp({"ol": 1}) * loge
clogp = c.logp({"c": 1}) * loge
expected = -np.prod((size,) + shape)
assert c.distribution.p.ndim == (len(shape) + 1)
assert np.allclose(clogp, expected)
assert ol.distribution.p.ndim == (len(shape) + 1)
assert np.allclose(ologp, expected)
class TestBugfixes:
@pytest.mark.parametrize(
"dist_cls,kwargs", [(MvNormal, dict(mu=0)), (MvStudentT, dict(mu=0, nu=2))]
)
@pytest.mark.parametrize("dims", [1, 2, 4])
def test_issue_3051(self, dims, dist_cls, kwargs):
d = dist_cls.dist(**kwargs, cov=np.eye(dims), shape=(dims,))
X = np.random.normal(size=(20, dims))
actual_t = d.logp(X)
assert isinstance(actual_t, tt.TensorVariable)
actual_a = actual_t.eval()
assert isinstance(actual_a, np.ndarray)
assert actual_a.shape == (X.shape[0],)
pass
def test_serialize_density_dist():
def func(x):
return -2 * (x ** 2).sum()
with pm.Model():
pm.Normal("x")
y = pm.DensityDist("y", func)
pm.sample(draws=5, tune=1, mp_ctx="spawn")
import pickle
pickle.loads(pickle.dumps(y))
| 34.46815 | 160 | 0.551014 |
import itertools
import sys
from .helpers import SeededTest, select_by_precision
from ..vartypes import continuous_types
from ..model import Model, Point, Deterministic
from ..blocking import DictToVarBijection
from ..distributions import (
DensityDist,
Categorical,
Multinomial,
VonMises,
Dirichlet,
MvStudentT,
MvNormal,
MatrixNormal,
ZeroInflatedPoisson,
ZeroInflatedNegativeBinomial,
Constant,
Poisson,
Bernoulli,
Beta,
BetaBinomial,
HalfStudentT,
StudentT,
Weibull,
Pareto,
InverseGamma,
Gamma,
Cauchy,
HalfCauchy,
Lognormal,
Laplace,
NegativeBinomial,
Geometric,
Exponential,
ExGaussian,
Normal,
TruncatedNormal,
Flat,
LKJCorr,
Wald,
ChiSquared,
HalfNormal,
DiscreteUniform,
Bound,
Uniform,
Triangular,
Binomial,
SkewNormal,
DiscreteWeibull,
Gumbel,
Logistic,
OrderedLogistic,
LogitNormal,
Interpolated,
ZeroInflatedBinomial,
HalfFlat,
AR1,
KroneckerNormal,
Rice,
Kumaraswamy,
Moyal,
HyperGeometric,
)
from ..distributions import continuous
from pymc3.theanof import floatX
import pymc3 as pm
from numpy import array, inf, log, exp
from numpy.testing import assert_almost_equal, assert_allclose, assert_equal
import numpy.random as nr
import numpy as np
import pytest
from scipy import integrate
import scipy.stats.distributions as sp
import scipy.stats
from scipy.special import logit
import theano
import theano.tensor as tt
from ..math import kronecker
def get_lkj_cases():
tri = np.array([0.7, 0.0, -0.7])
return [
(tri, 1, 3, 1.5963125911388549),
(tri, 3, 3, -7.7963493376312742),
(tri, 0, 3, -np.inf),
(np.array([1.1, 0.0, -0.7]), 1, 3, -np.inf),
(np.array([0.7, 0.0, -1.1]), 1, 3, -np.inf),
]
LKJ_CASES = get_lkj_cases()
class Domain:
def __init__(self, vals, dtype=None, edges=None, shape=None):
avals = array(vals, dtype=dtype)
if dtype is None and not str(avals.dtype).startswith("int"):
avals = avals.astype(theano.config.floatX)
vals = [array(v, dtype=avals.dtype) for v in vals]
if edges is None:
edges = array(vals[0]), array(vals[-1])
vals = vals[1:-1]
if shape is None:
shape = avals[0].shape
self.vals = vals
self.shape = shape
self.lower, self.upper = edges
self.dtype = avals.dtype
def __add__(self, other):
return Domain(
[v + other for v in self.vals],
self.dtype,
(self.lower + other, self.upper + other),
self.shape,
)
def __mul__(self, other):
try:
return Domain(
[v * other for v in self.vals],
self.dtype,
(self.lower * other, self.upper * other),
self.shape,
)
except TypeError:
return Domain(
[v * other for v in self.vals],
self.dtype,
(self.lower, self.upper),
self.shape,
)
def __neg__(self):
return Domain([-v for v in self.vals], self.dtype, (-self.lower, -self.upper), self.shape)
def product(domains, n_samples=-1):
try:
names, domains = zip(*domains.items())
except ValueError:
return [{}]
all_vals = [zip(names, val) for val in itertools.product(*[d.vals for d in domains])]
if n_samples > 0 and len(all_vals) > n_samples:
return (all_vals[j] for j in nr.choice(len(all_vals), n_samples, replace=False))
return all_vals
R = Domain([-inf, -2.1, -1, -0.01, 0.0, 0.01, 1, 2.1, inf])
Rplus = Domain([0, 0.01, 0.1, 0.9, 0.99, 1, 1.5, 2, 100, inf])
Rplusbig = Domain([0, 0.5, 0.9, 0.99, 1, 1.5, 2, 20, inf])
Rminusbig = Domain([-inf, -2, -1.5, -1, -0.99, -0.9, -0.5, -0.01, 0])
Unit = Domain([0, 0.001, 0.1, 0.5, 0.75, 0.99, 1])
Circ = Domain([-np.pi, -2.1, -1, -0.01, 0.0, 0.01, 1, 2.1, np.pi])
Runif = Domain([-1, -0.4, 0, 0.4, 1])
Rdunif = Domain([-10, 0, 10.0])
Rplusunif = Domain([0, 0.5, inf])
Rplusdunif = Domain([2, 10, 100], "int64")
I = Domain([-1000, -3, -2, -1, 0, 1, 2, 3, 1000], "int64")
NatSmall = Domain([0, 3, 4, 5, 1000], "int64")
Nat = Domain([0, 1, 2, 3, 2000], "int64")
NatBig = Domain([0, 1, 2, 3, 5000, 50000], "int64")
PosNat = Domain([1, 2, 3, 2000], "int64")
Bool = Domain([0, 0, 1, 1], "int64")
def build_model(distfam, valuedomain, vardomains, extra_args=None):
if extra_args is None:
extra_args = {}
with Model() as m:
vals = {}
for v, dom in vardomains.items():
vals[v] = Flat(v, dtype=dom.dtype, shape=dom.shape, testval=dom.vals[0])
vals.update(extra_args)
distfam("value", shape=valuedomain.shape, transform=None, **vals)
return m
def integrate_nd(f, domain, shape, dtype):
if shape == () or shape == (1,):
if dtype in continuous_types:
return integrate.quad(f, domain.lower, domain.upper, epsabs=1e-8)[0]
else:
return sum(f(j) for j in range(domain.lower, domain.upper + 1))
elif shape == (2,):
def f2(a, b):
return f([a, b])
return integrate.dblquad(
f2,
domain.lower[0],
domain.upper[0],
lambda _: domain.lower[1],
lambda _: domain.upper[1],
)[0]
elif shape == (3,):
def f3(a, b, c):
return f([a, b, c])
return integrate.tplquad(
f3,
domain.lower[0],
domain.upper[0],
lambda _: domain.lower[1],
lambda _: domain.upper[1],
lambda _, __: domain.lower[2],
lambda _, __: domain.upper[2],
)[0]
else:
raise ValueError("Dont know how to integrate shape: " + str(shape))
def multinomial_logpdf(value, n, p):
if value.sum() == n and (0 <= value).all() and (value <= n).all():
logpdf = scipy.special.gammaln(n + 1)
logpdf -= scipy.special.gammaln(value + 1).sum()
logpdf += logpow(p, value).sum()
return logpdf
else:
return -inf
def beta_mu_sigma(value, mu, sigma):
kappa = mu * (1 - mu) / sigma ** 2 - 1
if kappa > 0:
return sp.beta.logpdf(value, mu * kappa, (1 - mu) * kappa)
else:
return -inf
class ProductDomain:
def __init__(self, domains):
self.vals = list(itertools.product(*[d.vals for d in domains]))
self.shape = (len(domains),) + domains[0].shape
self.lower = [d.lower for d in domains]
self.upper = [d.upper for d in domains]
self.dtype = domains[0].dtype
def Vector(D, n):
return ProductDomain([D] * n)
def SortedVector(n):
vals = []
np.random.seed(42)
for _ in range(10):
vals.append(np.sort(np.random.randn(n)))
return Domain(vals, edges=(None, None))
def UnitSortedVector(n):
vals = []
np.random.seed(42)
for _ in range(10):
vals.append(np.sort(np.random.rand(n)))
return Domain(vals, edges=(None, None))
def RealMatrix(n, m):
vals = []
np.random.seed(42)
for _ in range(10):
vals.append(np.random.randn(n, m))
return Domain(vals, edges=(None, None))
def simplex_values(n):
if n == 1:
yield array([1.0])
else:
for v in Unit.vals:
for vals in simplex_values(n - 1):
yield np.concatenate([[v], (1 - v) * vals])
def normal_logpdf_tau(value, mu, tau):
return normal_logpdf_cov(value, mu, np.linalg.inv(tau)).sum()
def normal_logpdf_cov(value, mu, cov):
return scipy.stats.multivariate_normal.logpdf(value, mu, cov).sum()
def normal_logpdf_chol(value, mu, chol):
return normal_logpdf_cov(value, mu, np.dot(chol, chol.T)).sum()
def normal_logpdf_chol_upper(value, mu, chol):
return normal_logpdf_cov(value, mu, np.dot(chol.T, chol)).sum()
def matrix_normal_logpdf_cov(value, mu, rowcov, colcov):
return scipy.stats.matrix_normal.logpdf(value, mu, rowcov, colcov)
def matrix_normal_logpdf_chol(value, mu, rowchol, colchol):
return matrix_normal_logpdf_cov(
value, mu, np.dot(rowchol, rowchol.T), np.dot(colchol, colchol.T)
)
def kron_normal_logpdf_cov(value, mu, covs, sigma):
cov = kronecker(*covs).eval()
if sigma is not None:
cov += sigma ** 2 * np.eye(*cov.shape)
return scipy.stats.multivariate_normal.logpdf(value, mu, cov).sum()
def kron_normal_logpdf_chol(value, mu, chols, sigma):
covs = [np.dot(chol, chol.T) for chol in chols]
return kron_normal_logpdf_cov(value, mu, covs, sigma=sigma)
def kron_normal_logpdf_evd(value, mu, evds, sigma):
covs = []
for eigs, Q in evds:
try:
eigs = eigs.eval()
except AttributeError:
pass
try:
Q = Q.eval()
except AttributeError:
pass
covs.append(np.dot(Q, np.dot(np.diag(eigs), Q.T)))
return kron_normal_logpdf_cov(value, mu, covs, sigma)
def betafn(a):
return floatX(scipy.special.gammaln(a).sum(-1) - scipy.special.gammaln(a.sum(-1)))
def logpow(v, p):
return np.choose(v == 0, [p * np.log(v), 0])
def discrete_weibull_logpmf(value, q, beta):
return floatX(
np.log(
np.power(floatX(q), np.power(floatX(value), floatX(beta)))
- np.power(floatX(q), np.power(floatX(value + 1), floatX(beta)))
)
)
def dirichlet_logpdf(value, a):
return floatX((-betafn(a) + logpow(value, a - 1).sum(-1)).sum())
def categorical_logpdf(value, p):
if value >= 0 and value <= len(p):
return floatX(np.log(np.moveaxis(p, -1, 0)[value]))
else:
return -inf
def mvt_logpdf(value, nu, Sigma, mu=0):
d = len(Sigma)
dist = np.atleast_2d(value) - mu
chol = np.linalg.cholesky(Sigma)
trafo = np.linalg.solve(chol, dist.T).T
logdet = np.log(np.diag(chol)).sum()
lgamma = scipy.special.gammaln
norm = lgamma((nu + d) / 2.0) - 0.5 * d * np.log(nu * np.pi) - lgamma(nu / 2.0)
logp = norm - logdet - (nu + d) / 2.0 * np.log1p((trafo * trafo).sum(-1) / nu)
return logp.sum()
def AR1_logpdf(value, k, tau_e):
tau = tau_e * (1 - k ** 2)
return (
sp.norm(loc=0, scale=1 / np.sqrt(tau)).logpdf(value[0])
+ sp.norm(loc=k * value[:-1], scale=1 / np.sqrt(tau_e)).logpdf(value[1:]).sum()
)
def invlogit(x, eps=sys.float_info.epsilon):
return (1.0 - 2.0 * eps) / (1.0 + np.exp(-x)) + eps
def orderedlogistic_logpdf(value, eta, cutpoints):
c = np.concatenate(([-np.inf], cutpoints, [np.inf]))
ps = np.array([invlogit(eta - cc) - invlogit(eta - cc1) for cc, cc1 in zip(c[:-1], c[1:])])
p = ps[value]
return np.where(np.all(ps >= 0), np.log(p), -np.inf)
class Simplex:
def __init__(self, n):
self.vals = list(simplex_values(n))
self.shape = (n,)
self.dtype = Unit.dtype
class MultiSimplex:
def __init__(self, n_dependent, n_independent):
self.vals = []
for simplex_value in itertools.product(simplex_values(n_dependent), repeat=n_independent):
self.vals.append(np.vstack(simplex_value))
self.shape = (n_independent, n_dependent)
self.dtype = Unit.dtype
def PdMatrix(n):
if n == 1:
return PdMatrix1
elif n == 2:
return PdMatrix2
elif n == 3:
return PdMatrix3
else:
raise ValueError("n out of bounds")
PdMatrix1 = Domain([np.eye(1), [[0.5]]], edges=(None, None))
PdMatrix2 = Domain([np.eye(2), [[0.5, 0.05], [0.05, 4.5]]], edges=(None, None))
PdMatrix3 = Domain([np.eye(3), [[0.5, 0.1, 0], [0.1, 1, 0], [0, 0, 2.5]]], edges=(None, None))
PdMatrixChol1 = Domain([np.eye(1), [[0.001]]], edges=(None, None))
PdMatrixChol2 = Domain([np.eye(2), [[0.1, 0], [10, 1]]], edges=(None, None))
PdMatrixChol3 = Domain([np.eye(3), [[0.1, 0, 0], [10, 100, 0], [0, 1, 10]]], edges=(None, None))
def PdMatrixChol(n):
if n == 1:
return PdMatrixChol1
elif n == 2:
return PdMatrixChol2
elif n == 3:
return PdMatrixChol3
else:
raise ValueError("n out of bounds")
PdMatrixCholUpper1 = Domain([np.eye(1), [[0.001]]], edges=(None, None))
PdMatrixCholUpper2 = Domain([np.eye(2), [[0.1, 10], [0, 1]]], edges=(None, None))
PdMatrixCholUpper3 = Domain(
[np.eye(3), [[0.1, 10, 0], [0, 100, 1], [0, 0, 10]]], edges=(None, None)
)
def PdMatrixCholUpper(n):
if n == 1:
return PdMatrixCholUpper1
elif n == 2:
return PdMatrixCholUpper2
elif n == 3:
return PdMatrixCholUpper3
else:
raise ValueError("n out of bounds")
def RandomPdMatrix(n):
A = np.random.rand(n, n)
return np.dot(A, A.T) + n * np.identity(n)
class TestMatchesScipy(SeededTest):
def pymc3_matches_scipy(
self,
pymc3_dist,
domain,
paramdomains,
scipy_dist,
decimal=None,
extra_args=None,
scipy_args=None,
):
if extra_args is None:
extra_args = {}
if scipy_args is None:
scipy_args = {}
model = build_model(pymc3_dist, domain, paramdomains, extra_args)
value = model.named_vars["value"]
def logp(args):
args.update(scipy_args)
return scipy_dist(**args)
self.check_logp(model, value, domain, paramdomains, logp, decimal=decimal)
def check_logp(self, model, value, domain, paramdomains, logp_reference, decimal=None):
domains = paramdomains.copy()
domains["value"] = domain
logp = model.fastlogp
for pt in product(domains, n_samples=100):
pt = Point(pt, model=model)
if decimal is None:
decimal = select_by_precision(float64=6, float32=3)
assert_almost_equal(logp(pt), logp_reference(pt), decimal=decimal, err_msg=str(pt))
def check_logcdf(
self,
pymc3_dist,
domain,
paramdomains,
scipy_logcdf,
decimal=None,
n_samples=100,
):
domains = paramdomains.copy()
domains["value"] = domain
if decimal is None:
decimal = select_by_precision(float64=6, float32=3)
for pt in product(domains, n_samples=n_samples):
params = dict(pt)
scipy_cdf = scipy_logcdf(**params)
value = params.pop("value")
dist = pymc3_dist.dist(**params)
assert_almost_equal(
dist.logcdf(value).tag.test_value,
scipy_cdf,
decimal=decimal,
err_msg=str(pt),
)
def check_int_to_1(self, model, value, domain, paramdomains):
pdf = model.fastfn(exp(model.logpt))
for pt in product(paramdomains, n_samples=10):
pt = Point(pt, value=value.tag.test_value, model=model)
bij = DictToVarBijection(value, (), pt)
pdfx = bij.mapf(pdf)
area = integrate_nd(pdfx, domain, value.dshape, value.dtype)
assert_almost_equal(area, 1, err_msg=str(pt))
def checkd(self, distfam, valuedomain, vardomains, checks=None, extra_args=None):
if checks is None:
checks = (self.check_int_to_1,)
if extra_args is None:
extra_args = {}
m = build_model(distfam, valuedomain, vardomains, extra_args=extra_args)
for check in checks:
check(m, m.named_vars["value"], valuedomain, vardomains)
def test_uniform(self):
self.pymc3_matches_scipy(
Uniform,
Runif,
{"lower": -Rplusunif, "upper": Rplusunif},
lambda value, lower, upper: sp.uniform.logpdf(value, lower, upper - lower),
)
self.check_logcdf(
Uniform,
Runif,
{"lower": -Rplusunif, "upper": Rplusunif},
lambda value, lower, upper: sp.uniform.logcdf(value, lower, upper - lower),
)
def test_triangular(self):
self.pymc3_matches_scipy(
Triangular,
Runif,
{"lower": -Rplusunif, "c": Runif, "upper": Rplusunif},
lambda value, c, lower, upper: sp.triang.logpdf(value, c - lower, lower, upper - lower),
)
self.check_logcdf(
Triangular,
Runif,
{"lower": -Rplusunif, "c": Runif, "upper": Rplusunif},
lambda value, c, lower, upper: sp.triang.logcdf(value, c - lower, lower, upper - lower),
)
def test_bound_normal(self):
PositiveNormal = Bound(Normal, lower=0.0)
self.pymc3_matches_scipy(
PositiveNormal,
Rplus,
{"mu": Rplus, "sigma": Rplus},
lambda value, mu, sigma: sp.norm.logpdf(value, mu, sigma),
decimal=select_by_precision(float64=6, float32=-1),
)
with Model():
x = PositiveNormal("x", mu=0, sigma=1, transform=None)
assert np.isinf(x.logp({"x": -1}))
def test_discrete_unif(self):
self.pymc3_matches_scipy(
DiscreteUniform,
Rdunif,
{"lower": -Rplusdunif, "upper": Rplusdunif},
lambda value, lower, upper: sp.randint.logpmf(value, lower, upper + 1),
)
def test_flat(self):
self.pymc3_matches_scipy(Flat, Runif, {}, lambda value: 0)
with Model():
x = Flat("a")
assert_allclose(x.tag.test_value, 0)
self.check_logcdf(Flat, Runif, {}, lambda value: np.log(0.5))
assert 0.0 == Flat.dist().logcdf(np.inf).tag.test_value
assert -np.inf == Flat.dist().logcdf(-np.inf).tag.test_value
def test_half_flat(self):
self.pymc3_matches_scipy(HalfFlat, Rplus, {}, lambda value: 0)
with Model():
x = HalfFlat("a", shape=2)
assert_allclose(x.tag.test_value, 1)
assert x.tag.test_value.shape == (2,)
self.check_logcdf(HalfFlat, Runif, {}, lambda value: -np.inf)
assert 0.0 == HalfFlat.dist().logcdf(np.inf).tag.test_value
assert -np.inf == HalfFlat.dist().logcdf(-np.inf).tag.test_value
def test_normal(self):
self.pymc3_matches_scipy(
Normal,
R,
{"mu": R, "sigma": Rplus},
lambda value, mu, sigma: sp.norm.logpdf(value, mu, sigma),
decimal=select_by_precision(float64=6, float32=1),
)
self.check_logcdf(
Normal,
R,
{"mu": R, "sigma": Rplus},
lambda value, mu, sigma: sp.norm.logcdf(value, mu, sigma),
)
def test_truncated_normal(self):
def scipy_logp(value, mu, sigma, lower, upper):
return sp.truncnorm.logpdf(
value, (lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma
)
self.pymc3_matches_scipy(
TruncatedNormal,
R,
{"mu": R, "sigma": Rplusbig, "lower": -Rplusbig, "upper": Rplusbig},
scipy_logp,
decimal=select_by_precision(float64=6, float32=1),
)
def test_half_normal(self):
self.pymc3_matches_scipy(
HalfNormal,
Rplus,
{"sigma": Rplus},
lambda value, sigma: sp.halfnorm.logpdf(value, scale=sigma),
decimal=select_by_precision(float64=6, float32=-1),
)
self.check_logcdf(
HalfNormal,
Rplus,
{"sigma": Rplus},
lambda value, sigma: sp.halfnorm.logcdf(value, scale=sigma),
)
def test_chi_squared(self):
self.pymc3_matches_scipy(
ChiSquared,
Rplus,
{"nu": Rplusdunif},
lambda value, nu: sp.chi2.logpdf(value, df=nu),
)
@pytest.mark.xfail(reason="Poor CDF in SciPy. See scipy/scipy#869 for details.")
def test_wald_scipy(self):
self.pymc3_matches_scipy(
Wald,
Rplus,
{"mu": Rplus, "alpha": Rplus},
lambda value, mu, alpha: sp.invgauss.logpdf(value, mu=mu, loc=alpha),
decimal=select_by_precision(float64=6, float32=1),
)
self.check_logcdf(
Wald,
Rplus,
{"mu": Rplus, "alpha": Rplus},
lambda value, mu, alpha: sp.invgauss.logcdf(value, mu=mu, loc=alpha),
)
@pytest.mark.parametrize(
"value,mu,lam,phi,alpha,logp",
[
(0.5, 0.001, 0.5, None, 0.0, -124500.7257914),
(1.0, 0.5, 0.001, None, 0.0, -4.3733162),
(2.0, 1.0, None, None, 0.0, -2.2086593),
(5.0, 2.0, 2.5, None, 0.0, -3.4374500),
(7.5, 5.0, None, 1.0, 0.0, -3.2199074),
(15.0, 10.0, None, 0.75, 0.0, -4.0360623),
(50.0, 15.0, None, 0.66666, 0.0, -6.1801249),
(0.5, 0.001, 0.5, None, 0.0, -124500.7257914),
(1.0, 0.5, 0.001, None, 0.5, -3.3330954),
(2.0, 1.0, None, None, 1.0, -0.9189385),
(5.0, 2.0, 2.5, None, 2.0, -2.2128783),
(7.5, 5.0, None, 1.0, 2.5, -2.5283764),
(15.0, 10.0, None, 0.75, 5.0, -3.3653647),
(50.0, 15.0, None, 0.666666, 10.0, -5.6481874),
],
)
def test_wald(self, value, mu, lam, phi, alpha, logp):
with Model() as model:
Wald("wald", mu=mu, lam=lam, phi=phi, alpha=alpha, transform=None)
pt = {"wald": value}
decimals = select_by_precision(float64=6, float32=1)
assert_almost_equal(model.fastlogp(pt), logp, decimal=decimals, err_msg=str(pt))
def test_beta(self):
self.pymc3_matches_scipy(
Beta,
Unit,
{"alpha": Rplus, "beta": Rplus},
lambda value, alpha, beta: sp.beta.logpdf(value, alpha, beta),
)
self.pymc3_matches_scipy(Beta, Unit, {"mu": Unit, "sigma": Rplus}, beta_mu_sigma)
self.check_logcdf(
Beta,
Unit,
{"alpha": Rplus, "beta": Rplus},
lambda value, alpha, beta: sp.beta.logcdf(value, alpha, beta),
)
def test_kumaraswamy(self):
def scipy_log_pdf(value, a, b):
return (
np.log(a) + np.log(b) + (a - 1) * np.log(value) + (b - 1) * np.log(1 - value ** a)
)
self.pymc3_matches_scipy(Kumaraswamy, Unit, {"a": Rplus, "b": Rplus}, scipy_log_pdf)
def test_exponential(self):
self.pymc3_matches_scipy(
Exponential,
Rplus,
{"lam": Rplus},
lambda value, lam: sp.expon.logpdf(value, 0, 1 / lam),
)
self.check_logcdf(
Exponential,
Rplus,
{"lam": Rplus},
lambda value, lam: sp.expon.logcdf(value, 0, 1 / lam),
)
def test_geometric(self):
self.pymc3_matches_scipy(
Geometric, Nat, {"p": Unit}, lambda value, p: np.log(sp.geom.pmf(value, p))
)
def test_hypergeometric(self):
self.pymc3_matches_scipy(
HyperGeometric,
Nat,
{"N": NatSmall, "k": NatSmall, "n": NatSmall},
lambda value, N, k, n: sp.hypergeom.logpmf(value, N, k, n),
)
def test_negative_binomial(self):
def test_fun(value, mu, alpha):
return sp.nbinom.logpmf(value, alpha, 1 - mu / (mu + alpha))
self.pymc3_matches_scipy(NegativeBinomial, Nat, {"mu": Rplus, "alpha": Rplus}, test_fun)
self.pymc3_matches_scipy(
NegativeBinomial,
Nat,
{"p": Unit, "n": Rplus},
lambda value, p, n: sp.nbinom.logpmf(value, n, p),
)
@pytest.mark.parametrize(
"mu, p, alpha, n, expected",
[
(5, None, None, None, "Must specify either alpha or n."),
(None, 0.5, None, None, "Must specify either alpha or n."),
(None, None, None, None, "Must specify either alpha or n."),
(5, None, 2, 2, "Can't specify both alpha and n."),
(None, 0.5, 2, 2, "Can't specify both alpha and n."),
(None, None, 2, 2, "Can't specify both alpha and n."),
(None, None, 2, None, "Must specify either mu or p."),
(None, None, None, 2, "Must specify either mu or p."),
(5, 0.5, 2, None, "Can't specify both mu and p."),
(5, 0.5, None, 2, "Can't specify both mu and p."),
],
)
def test_negative_binomial_init_fail(self, mu, p, alpha, n, expected):
with Model():
with pytest.raises(ValueError, match=f"Incompatible parametrization. {expected}"):
NegativeBinomial("x", mu=mu, p=p, alpha=alpha, n=n)
def test_laplace(self):
self.pymc3_matches_scipy(
Laplace,
R,
{"mu": R, "b": Rplus},
lambda value, mu, b: sp.laplace.logpdf(value, mu, b),
)
self.check_logcdf(
Laplace,
R,
{"mu": R, "b": Rplus},
lambda value, mu, b: sp.laplace.logcdf(value, mu, b),
)
def test_lognormal(self):
self.pymc3_matches_scipy(
Lognormal,
Rplus,
{"mu": R, "tau": Rplusbig},
lambda value, mu, tau: floatX(sp.lognorm.logpdf(value, tau ** -0.5, 0, np.exp(mu))),
)
self.check_logcdf(
Lognormal,
Rplus,
{"mu": R, "tau": Rplusbig},
lambda value, mu, tau: sp.lognorm.logcdf(value, tau ** -0.5, 0, np.exp(mu)),
)
def test_t(self):
self.pymc3_matches_scipy(
StudentT,
R,
{"nu": Rplus, "mu": R, "lam": Rplus},
lambda value, nu, mu, lam: sp.t.logpdf(value, nu, mu, lam ** -0.5),
)
self.check_logcdf(
StudentT,
R,
{"nu": Rplus, "mu": R, "lam": Rplus},
lambda value, nu, mu, lam: sp.t.logcdf(value, nu, mu, lam ** -0.5),
n_samples=10,
)
def test_cauchy(self):
self.pymc3_matches_scipy(
Cauchy,
R,
{"alpha": R, "beta": Rplusbig},
lambda value, alpha, beta: sp.cauchy.logpdf(value, alpha, beta),
)
self.check_logcdf(
Cauchy,
R,
{"alpha": R, "beta": Rplusbig},
lambda value, alpha, beta: sp.cauchy.logcdf(value, alpha, beta),
)
def test_half_cauchy(self):
self.pymc3_matches_scipy(
HalfCauchy,
Rplus,
{"beta": Rplusbig},
lambda value, beta: sp.halfcauchy.logpdf(value, scale=beta),
)
self.check_logcdf(
HalfCauchy,
Rplus,
{"beta": Rplusbig},
lambda value, beta: sp.halfcauchy.logcdf(value, scale=beta),
)
def test_gamma(self):
self.pymc3_matches_scipy(
Gamma,
Rplus,
{"alpha": Rplusbig, "beta": Rplusbig},
lambda value, alpha, beta: sp.gamma.logpdf(value, alpha, scale=1.0 / beta),
)
def test_fun(value, mu, sigma):
return sp.gamma.logpdf(value, mu ** 2 / sigma ** 2, scale=1.0 / (mu / sigma ** 2))
self.pymc3_matches_scipy(Gamma, Rplus, {"mu": Rplusbig, "sigma": Rplusbig}, test_fun)
self.check_logcdf(
Gamma,
Rplus,
{"alpha": Rplusbig, "beta": Rplusbig},
lambda value, alpha, beta: sp.gamma.logcdf(value, alpha, scale=1.0 / beta),
)
@pytest.mark.xfail(
condition=(theano.config.floatX == "float32"),
reason="Fails on float32 due to numerical issues",
)
def test_inverse_gamma(self):
self.pymc3_matches_scipy(
InverseGamma,
Rplus,
{"alpha": Rplus, "beta": Rplus},
lambda value, alpha, beta: sp.invgamma.logpdf(value, alpha, scale=beta),
)
self.check_logcdf(
InverseGamma,
Rplus,
{"alpha": Rplus, "beta": Rplus},
lambda value, alpha, beta: sp.invgamma.logcdf(value, alpha, scale=beta),
)
@pytest.mark.xfail(
condition=(theano.config.floatX == "float32"),
reason="Fails on float32 due to scaling issues",
)
def test_inverse_gamma_alt_params(self):
def test_fun(value, mu, sigma):
alpha, beta = InverseGamma._get_alpha_beta(None, None, mu, sigma)
return sp.invgamma.logpdf(value, alpha, scale=beta)
self.pymc3_matches_scipy(InverseGamma, Rplus, {"mu": Rplus, "sigma": Rplus}, test_fun)
def test_pareto(self):
self.pymc3_matches_scipy(
Pareto,
Rplus,
{"alpha": Rplusbig, "m": Rplusbig},
lambda value, alpha, m: sp.pareto.logpdf(value, alpha, scale=m),
)
self.check_logcdf(
Pareto,
Rplus,
{"alpha": Rplusbig, "m": Rplusbig},
lambda value, alpha, m: sp.pareto.logcdf(value, alpha, scale=m),
)
@pytest.mark.xfail(
condition=(theano.config.floatX == "float32"),
reason="Fails on float32 due to inf issues",
)
def test_weibull(self):
self.pymc3_matches_scipy(
Weibull,
Rplus,
{"alpha": Rplusbig, "beta": Rplusbig},
lambda value, alpha, beta: sp.exponweib.logpdf(value, 1, alpha, scale=beta),
)
self.check_logcdf(
Weibull,
Rplus,
{"alpha": Rplusbig, "beta": Rplusbig},
lambda value, alpha, beta: sp.exponweib.logcdf(value, 1, alpha, scale=beta),
)
def test_half_studentt(self):
# this is only testing for nu=1 (halfcauchy)
self.pymc3_matches_scipy(
HalfStudentT,
Rplus,
{"sigma": Rplus},
lambda value, sigma: sp.halfcauchy.logpdf(value, 0, sigma),
)
def test_skew_normal(self):
self.pymc3_matches_scipy(
SkewNormal,
R,
{"mu": R, "sigma": Rplusbig, "alpha": R},
lambda value, alpha, mu, sigma: sp.skewnorm.logpdf(value, alpha, mu, sigma),
)
def test_binomial(self):
self.pymc3_matches_scipy(
Binomial,
Nat,
{"n": NatSmall, "p": Unit},
lambda value, n, p: sp.binom.logpmf(value, n, p),
)
# Too lazy to propagate decimal parameter through the whole chain of deps
@pytest.mark.xfail(condition=(theano.config.floatX == "float32"), reason="Fails on float32")
def test_beta_binomial(self):
self.checkd(BetaBinomial, Nat, {"alpha": Rplus, "beta": Rplus, "n": NatSmall})
def test_bernoulli(self):
self.pymc3_matches_scipy(
Bernoulli,
Bool,
{"logit_p": R},
lambda value, logit_p: sp.bernoulli.logpmf(value, scipy.special.expit(logit_p)),
)
self.pymc3_matches_scipy(
Bernoulli, Bool, {"p": Unit}, lambda value, p: sp.bernoulli.logpmf(value, p)
)
def test_discrete_weibull(self):
self.pymc3_matches_scipy(
DiscreteWeibull,
Nat,
{"q": Unit, "beta": Rplusdunif},
discrete_weibull_logpmf,
)
def test_poisson(self):
self.pymc3_matches_scipy(
Poisson, Nat, {"mu": Rplus}, lambda value, mu: sp.poisson.logpmf(value, mu)
)
def test_bound_poisson(self):
NonZeroPoisson = Bound(Poisson, lower=1.0)
self.pymc3_matches_scipy(
NonZeroPoisson,
PosNat,
{"mu": Rplus},
lambda value, mu: sp.poisson.logpmf(value, mu),
)
with Model():
x = NonZeroPoisson("x", mu=4)
assert np.isinf(x.logp({"x": 0}))
def test_constantdist(self):
self.pymc3_matches_scipy(Constant, I, {"c": I}, lambda value, c: np.log(c == value))
# Too lazy to propagate decimal parameter through the whole chain of deps
@pytest.mark.xfail(condition=(theano.config.floatX == "float32"), reason="Fails on float32")
def test_zeroinflatedpoisson(self):
self.checkd(ZeroInflatedPoisson, Nat, {"theta": Rplus, "psi": Unit})
# Too lazy to propagate decimal parameter through the whole chain of deps
@pytest.mark.xfail(condition=(theano.config.floatX == "float32"), reason="Fails on float32")
def test_zeroinflatednegativebinomial(self):
self.checkd(
ZeroInflatedNegativeBinomial,
Nat,
{"mu": Rplusbig, "alpha": Rplusbig, "psi": Unit},
)
# Too lazy to propagate decimal parameter through the whole chain of deps
@pytest.mark.xfail(condition=(theano.config.floatX == "float32"), reason="Fails on float32")
def test_zeroinflatedbinomial(self):
self.checkd(ZeroInflatedBinomial, Nat, {"n": NatSmall, "p": Unit, "psi": Unit})
@pytest.mark.parametrize("n", [1, 2, 3])
def test_mvnormal(self, n):
self.pymc3_matches_scipy(
MvNormal,
RealMatrix(5, n),
{"mu": Vector(R, n), "tau": PdMatrix(n)},
normal_logpdf_tau,
)
self.pymc3_matches_scipy(
MvNormal,
Vector(R, n),
{"mu": Vector(R, n), "tau": PdMatrix(n)},
normal_logpdf_tau,
)
self.pymc3_matches_scipy(
MvNormal,
RealMatrix(5, n),
{"mu": Vector(R, n), "cov": PdMatrix(n)},
normal_logpdf_cov,
)
self.pymc3_matches_scipy(
MvNormal,
Vector(R, n),
{"mu": Vector(R, n), "cov": PdMatrix(n)},
normal_logpdf_cov,
)
self.pymc3_matches_scipy(
MvNormal,
RealMatrix(5, n),
{"mu": Vector(R, n), "chol": PdMatrixChol(n)},
normal_logpdf_chol,
decimal=select_by_precision(float64=6, float32=-1),
)
self.pymc3_matches_scipy(
MvNormal,
Vector(R, n),
{"mu": Vector(R, n), "chol": PdMatrixChol(n)},
normal_logpdf_chol,
decimal=select_by_precision(float64=6, float32=0),
)
def MvNormalUpper(*args, **kwargs):
return MvNormal(lower=False, *args, **kwargs)
self.pymc3_matches_scipy(
MvNormalUpper,
Vector(R, n),
{"mu": Vector(R, n), "chol": PdMatrixCholUpper(n)},
normal_logpdf_chol_upper,
decimal=select_by_precision(float64=6, float32=0),
)
@pytest.mark.xfail(
condition=(theano.config.floatX == "float32"),
reason="Fails on float32 due to inf issues",
)
def test_mvnormal_indef(self):
cov_val = np.array([[1, 0.5], [0.5, -2]])
cov = tt.matrix("cov")
cov.tag.test_value = np.eye(2)
mu = floatX(np.zeros(2))
x = tt.vector("x")
x.tag.test_value = np.zeros(2)
logp = MvNormal.dist(mu=mu, cov=cov).logp(x)
f_logp = theano.function([cov, x], logp)
assert f_logp(cov_val, np.ones(2)) == -np.inf
dlogp = tt.grad(logp, cov)
f_dlogp = theano.function([cov, x], dlogp)
assert not np.all(np.isfinite(f_dlogp(cov_val, np.ones(2))))
logp = MvNormal.dist(mu=mu, tau=cov).logp(x)
f_logp = theano.function([cov, x], logp)
assert f_logp(cov_val, np.ones(2)) == -np.inf
dlogp = tt.grad(logp, cov)
f_dlogp = theano.function([cov, x], dlogp)
assert not np.all(np.isfinite(f_dlogp(cov_val, np.ones(2))))
def test_mvnormal_init_fail(self):
with Model():
with pytest.raises(ValueError):
x = MvNormal("x", mu=np.zeros(3), shape=3)
with pytest.raises(ValueError):
x = MvNormal("x", mu=np.zeros(3), cov=np.eye(3), tau=np.eye(3), shape=3)
@pytest.mark.parametrize("n", [1, 2, 3])
def test_matrixnormal(self, n):
mat_scale = 1e3 # To reduce logp magnitude
mean_scale = 0.1
self.pymc3_matches_scipy(
MatrixNormal,
RealMatrix(n, n),
{
"mu": RealMatrix(n, n) * mean_scale,
"rowcov": PdMatrix(n) * mat_scale,
"colcov": PdMatrix(n) * mat_scale,
},
matrix_normal_logpdf_cov,
)
self.pymc3_matches_scipy(
MatrixNormal,
RealMatrix(2, n),
{
"mu": RealMatrix(2, n) * mean_scale,
"rowcov": PdMatrix(2) * mat_scale,
"colcov": PdMatrix(n) * mat_scale,
},
matrix_normal_logpdf_cov,
)
self.pymc3_matches_scipy(
MatrixNormal,
RealMatrix(3, n),
{
"mu": RealMatrix(3, n) * mean_scale,
"rowchol": PdMatrixChol(3) * mat_scale,
"colchol": PdMatrixChol(n) * mat_scale,
},
matrix_normal_logpdf_chol,
decimal=select_by_precision(float64=6, float32=-1),
)
self.pymc3_matches_scipy(
MatrixNormal,
RealMatrix(n, 3),
{
"mu": RealMatrix(n, 3) * mean_scale,
"rowchol": PdMatrixChol(n) * mat_scale,
"colchol": PdMatrixChol(3) * mat_scale,
},
matrix_normal_logpdf_chol,
decimal=select_by_precision(float64=6, float32=0),
)
@pytest.mark.parametrize("n", [2, 3])
@pytest.mark.parametrize("m", [3])
@pytest.mark.parametrize("sigma", [None, 1.0])
def test_kroneckernormal(self, n, m, sigma):
np.random.seed(5)
N = n * m
covs = [RandomPdMatrix(n), RandomPdMatrix(m)]
chols = list(map(np.linalg.cholesky, covs))
evds = list(map(np.linalg.eigh, covs))
dom = Domain([np.random.randn(N) * 0.1], edges=(None, None), shape=N)
mu = Domain([np.random.randn(N) * 0.1], edges=(None, None), shape=N)
std_args = {"mu": mu}
cov_args = {"covs": covs}
chol_args = {"chols": chols}
evd_args = {"evds": evds}
if sigma is not None and sigma != 0:
std_args["sigma"] = Domain([sigma], edges=(None, None))
else:
for args in [cov_args, chol_args, evd_args]:
args["sigma"] = sigma
self.pymc3_matches_scipy(
KroneckerNormal,
dom,
std_args,
kron_normal_logpdf_cov,
extra_args=cov_args,
scipy_args=cov_args,
)
self.pymc3_matches_scipy(
KroneckerNormal,
dom,
std_args,
kron_normal_logpdf_chol,
extra_args=chol_args,
scipy_args=chol_args,
)
self.pymc3_matches_scipy(
KroneckerNormal,
dom,
std_args,
kron_normal_logpdf_evd,
extra_args=evd_args,
scipy_args=evd_args,
)
dom = Domain([np.random.randn(2, N) * 0.1], edges=(None, None), shape=(2, N))
self.pymc3_matches_scipy(
KroneckerNormal,
dom,
std_args,
kron_normal_logpdf_cov,
extra_args=cov_args,
scipy_args=cov_args,
)
self.pymc3_matches_scipy(
KroneckerNormal,
dom,
std_args,
kron_normal_logpdf_chol,
extra_args=chol_args,
scipy_args=chol_args,
)
self.pymc3_matches_scipy(
KroneckerNormal,
dom,
std_args,
kron_normal_logpdf_evd,
extra_args=evd_args,
scipy_args=evd_args,
)
@pytest.mark.parametrize("n", [1, 2])
def test_mvt(self, n):
self.pymc3_matches_scipy(
MvStudentT,
Vector(R, n),
{"nu": Rplus, "Sigma": PdMatrix(n), "mu": Vector(R, n)},
mvt_logpdf,
)
self.pymc3_matches_scipy(
MvStudentT,
RealMatrix(2, n),
{"nu": Rplus, "Sigma": PdMatrix(n), "mu": Vector(R, n)},
mvt_logpdf,
)
@pytest.mark.parametrize("n", [2, 3, 4])
def test_AR1(self, n):
self.pymc3_matches_scipy(AR1, Vector(R, n), {"k": Unit, "tau_e": Rplus}, AR1_logpdf)
@pytest.mark.parametrize("n", [2, 3])
def test_wishart(self, n):
# This check compares the autodiff gradient to the numdiff gradient.
# However, due to the strict constraints of the wishart,
# it is impossible to numerically determine the gradient as a small
# pertubation breaks the symmetry. Thus disabling. Also, numdifftools was
# removed in June 2019, so an alternative would be needed.
#
# self.checkd(Wishart, PdMatrix(n), {'n': Domain([2, 3, 4, 2000]), 'V': PdMatrix(n)},
# checks=[self.check_dlogp])
pass
@pytest.mark.parametrize("x,eta,n,lp", LKJ_CASES)
def test_lkj(self, x, eta, n, lp):
with Model() as model:
LKJCorr("lkj", eta=eta, n=n, transform=None)
pt = {"lkj": x}
decimals = select_by_precision(float64=6, float32=4)
assert_almost_equal(model.fastlogp(pt), lp, decimal=decimals, err_msg=str(pt))
@pytest.mark.parametrize("n", [2, 3])
def test_dirichlet(self, n):
self.pymc3_matches_scipy(Dirichlet, Simplex(n), {"a": Vector(Rplus, n)}, dirichlet_logpdf)
def test_dirichlet_shape(self):
a = tt.as_tensor_variable(np.r_[1, 2])
with pytest.warns(DeprecationWarning):
dir_rv = Dirichlet.dist(a)
assert dir_rv.shape == (2,)
with pytest.warns(DeprecationWarning), theano.change_flags(compute_test_value="ignore"):
dir_rv = Dirichlet.dist(tt.vector())
def test_dirichlet_2D(self):
self.pymc3_matches_scipy(
Dirichlet,
MultiSimplex(2, 2),
{"a": Vector(Vector(Rplus, 2), 2)},
dirichlet_logpdf,
)
@pytest.mark.parametrize("n", [2, 3])
def test_multinomial(self, n):
self.pymc3_matches_scipy(
Multinomial, Vector(Nat, n), {"p": Simplex(n), "n": Nat}, multinomial_logpdf
)
@pytest.mark.parametrize(
"p,n",
[
[[0.25, 0.25, 0.25, 0.25], 1],
[[0.3, 0.6, 0.05, 0.05], 2],
[[0.3, 0.6, 0.05, 0.05], 10],
],
)
def test_multinomial_mode(self, p, n):
_p = np.array(p)
with Model() as model:
m = Multinomial("m", n, _p, _p.shape)
assert_allclose(m.distribution.mode.eval().sum(), n)
_p = np.array([p, p])
with Model() as model:
m = Multinomial("m", n, _p, _p.shape)
assert_allclose(m.distribution.mode.eval().sum(axis=-1), n)
@pytest.mark.parametrize(
"p, shape, n",
[
[[0.25, 0.25, 0.25, 0.25], 4, 2],
[[0.25, 0.25, 0.25, 0.25], (1, 4), 3],
# 3: expect to fail
# [[.25, .25, .25, .25], (10, 4)],
[[0.25, 0.25, 0.25, 0.25], (10, 1, 4), 5],
# 5: expect to fail
# [[[.25, .25, .25, .25]], (2, 4), [7, 11]],
[[[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]], (2, 4), 13],
[[[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]], (1, 2, 4), [23, 29]],
[
[[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]],
(10, 2, 4),
[31, 37],
],
[[[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]], (2, 4), [17, 19]],
],
)
def test_multinomial_random(self, p, shape, n):
p = np.asarray(p)
with Model() as model:
m = Multinomial("m", n=n, p=p, shape=shape)
m.random()
def test_multinomial_mode_with_shape(self):
n = [1, 10]
p = np.asarray([[0.25, 0.25, 0.25, 0.25], [0.26, 0.26, 0.26, 0.22]])
with Model() as model:
m = Multinomial("m", n=n, p=p, shape=(2, 4))
assert_allclose(m.distribution.mode.eval().sum(axis=-1), n)
def test_multinomial_vec(self):
vals = np.array([[2, 4, 4], [3, 3, 4]])
p = np.array([0.2, 0.3, 0.5])
n = 10
with Model() as model_single:
Multinomial("m", n=n, p=p, shape=len(p))
with Model() as model_many:
Multinomial("m", n=n, p=p, shape=vals.shape)
assert_almost_equal(
scipy.stats.multinomial.logpmf(vals, n, p),
np.asarray([model_single.fastlogp({"m": val}) for val in vals]),
decimal=4,
)
assert_almost_equal(
scipy.stats.multinomial.logpmf(vals, n, p),
model_many.free_RVs[0].logp_elemwise({"m": vals}).squeeze(),
decimal=4,
)
assert_almost_equal(
sum([model_single.fastlogp({"m": val}) for val in vals]),
model_many.fastlogp({"m": vals}),
decimal=4,
)
def test_multinomial_vec_1d_n(self):
vals = np.array([[2, 4, 4], [4, 3, 4]])
p = np.array([0.2, 0.3, 0.5])
ns = np.array([10, 11])
with Model() as model:
Multinomial("m", n=ns, p=p, shape=vals.shape)
assert_almost_equal(
sum([multinomial_logpdf(val, n, p) for val, n in zip(vals, ns)]),
model.fastlogp({"m": vals}),
decimal=4,
)
def test_multinomial_vec_1d_n_2d_p(self):
vals = np.array([[2, 4, 4], [4, 3, 4]])
ps = np.array([[0.2, 0.3, 0.5], [0.9, 0.09, 0.01]])
ns = np.array([10, 11])
with Model() as model:
Multinomial("m", n=ns, p=ps, shape=vals.shape)
assert_almost_equal(
sum([multinomial_logpdf(val, n, p) for val, n, p in zip(vals, ns, ps)]),
model.fastlogp({"m": vals}),
decimal=4,
)
def test_multinomial_vec_2d_p(self):
vals = np.array([[2, 4, 4], [3, 3, 4]])
ps = np.array([[0.2, 0.3, 0.5], [0.3, 0.3, 0.4]])
n = 10
with Model() as model:
Multinomial("m", n=n, p=ps, shape=vals.shape)
assert_almost_equal(
sum([multinomial_logpdf(val, n, p) for val, p in zip(vals, ps)]),
model.fastlogp({"m": vals}),
decimal=4,
)
def test_batch_multinomial(self):
n = 10
vals = np.zeros((4, 5, 3), dtype="int32")
p = np.zeros_like(vals, dtype=theano.config.floatX)
inds = np.random.randint(vals.shape[-1], size=vals.shape[:-1])[..., None]
np.put_along_axis(vals, inds, n, axis=-1)
np.put_along_axis(p, inds, 1, axis=-1)
dist = Multinomial.dist(n=n, p=p, shape=vals.shape)
value = tt.tensor3(dtype="int32")
value.tag.test_value = np.zeros_like(vals, dtype="int32")
logp = tt.exp(dist.logp(value))
f = theano.function(inputs=[value], outputs=logp)
assert_almost_equal(
f(vals),
np.ones(vals.shape[:-1] + (1,)),
decimal=select_by_precision(float64=6, float32=3),
)
sample = dist.random(size=2)
assert_allclose(sample, np.stack([vals, vals], axis=0))
def test_categorical_bounds(self):
with Model():
x = Categorical("x", p=np.array([0.2, 0.3, 0.5]))
assert np.isinf(x.logp({"x": -1}))
assert np.isinf(x.logp({"x": 3}))
def test_categorical_valid_p(self):
with Model():
x = Categorical("x", p=np.array([-0.2, 0.3, 0.5]))
assert np.isinf(x.logp({"x": 0}))
assert np.isinf(x.logp({"x": 1}))
assert np.isinf(x.logp({"x": 2}))
with Model():
# A model where p sums to 1 but contains negative values
x = Categorical("x", p=np.array([-0.2, 0.7, 0.5]))
assert np.isinf(x.logp({"x": 0}))
assert np.isinf(x.logp({"x": 1}))
assert np.isinf(x.logp({"x": 2}))
with Model():
# Hard edge case from #2082
# Early automatic normalization of p's sum would hide the negative
x = Categorical("x", p=np.array([-1, -1, 0, 0]))
assert np.isinf(x.logp({"x": 0}))
assert np.isinf(x.logp({"x": 1}))
assert np.isinf(x.logp({"x": 2}))
assert np.isinf(x.logp({"x": 3}))
@pytest.mark.parametrize("n", [2, 3, 4])
def test_categorical(self, n):
self.pymc3_matches_scipy(
Categorical,
Domain(range(n), "int64"),
{"p": Simplex(n)},
lambda value, p: categorical_logpdf(value, p),
)
@pytest.mark.parametrize("n", [2, 3, 4])
def test_orderedlogistic(self, n):
self.pymc3_matches_scipy(
OrderedLogistic,
Domain(range(n), "int64"),
{"eta": R, "cutpoints": Vector(R, n - 1)},
lambda value, eta, cutpoints: orderedlogistic_logpdf(value, eta, cutpoints),
)
def test_densitydist(self):
def logp(x):
return -log(2 * 0.5) - abs(x - 0.5) / 0.5
self.checkd(DensityDist, R, {}, extra_args={"logp": logp})
def test_get_tau_sigma(self):
sigma = np.array([2])
assert_almost_equal(continuous.get_tau_sigma(sigma=sigma), [1.0 / sigma ** 2, sigma])
@pytest.mark.parametrize(
"value,mu,sigma,nu,logp",
[
(0.5, -50.000, 0.500, 0.500, -99.8068528),
(1.0, -1.000, 0.001, 0.001, -1992.5922447),
(2.0, 0.001, 1.000, 1.000, -1.6720416),
(5.0, 0.500, 2.500, 2.500, -2.4543644),
(7.5, 2.000, 5.000, 5.000, -2.8259429),
(15.0, 5.000, 7.500, 7.500, -3.3093854),
(50.0, 50.000, 10.000, 10.000, -3.6436067),
(1000.0, 500.000, 10.000, 20.000, -27.8707323),
],
)
def test_ex_gaussian(self, value, mu, sigma, nu, logp):
with Model() as model:
ExGaussian("eg", mu=mu, sigma=sigma, nu=nu)
pt = {"eg": value}
assert_almost_equal(
model.fastlogp(pt),
logp,
decimal=select_by_precision(float64=6, float32=2),
err_msg=str(pt),
)
@pytest.mark.parametrize(
"value,mu,sigma,nu,logcdf",
[
(0.5, -50.000, 0.500, 0.500, 0.0000000),
(1.0, -1.000, 0.001, 0.001, 0.0000000),
(2.0, 0.001, 1.000, 1.000, -0.2365674),
(5.0, 0.500, 2.500, 2.500, -0.2886489),
(7.5, 2.000, 5.000, 5.000, -0.5655104),
(15.0, 5.000, 7.500, 7.500, -0.4545255),
(50.0, 50.000, 10.000, 10.000, -1.433714),
(1000.0, 500.000, 10.000, 20.000, -1.573708e-11),
],
)
def test_ex_gaussian_cdf(self, value, mu, sigma, nu, logcdf):
assert_almost_equal(
ExGaussian.dist(mu=mu, sigma=sigma, nu=nu).logcdf(value).tag.test_value,
logcdf,
decimal=select_by_precision(float64=6, float32=2),
err_msg=str((value, mu, sigma, nu, logcdf)),
)
@pytest.mark.xfail(condition=(theano.config.floatX == "float32"), reason="Fails on float32")
def test_vonmises(self):
self.pymc3_matches_scipy(
VonMises,
R,
{"mu": Circ, "kappa": Rplus},
lambda value, mu, kappa: floatX(sp.vonmises.logpdf(value, kappa, loc=mu)),
)
def test_gumbel(self):
def gumbel(value, mu, beta):
return floatX(sp.gumbel_r.logpdf(value, loc=mu, scale=beta))
self.pymc3_matches_scipy(Gumbel, R, {"mu": R, "beta": Rplusbig}, gumbel)
def gumbellcdf(value, mu, beta):
return floatX(sp.gumbel_r.logcdf(value, loc=mu, scale=beta))
self.check_logcdf(Gumbel, R, {"mu": R, "beta": Rplusbig}, gumbellcdf)
def test_logistic(self):
self.pymc3_matches_scipy(
Logistic,
R,
{"mu": R, "s": Rplus},
lambda value, mu, s: sp.logistic.logpdf(value, mu, s),
decimal=select_by_precision(float64=6, float32=1),
)
self.check_logcdf(
Logistic,
R,
{"mu": R, "s": Rplus},
lambda value, mu, s: sp.logistic.logcdf(value, mu, s),
decimal=select_by_precision(float64=6, float32=1),
)
def test_logitnormal(self):
self.pymc3_matches_scipy(
LogitNormal,
Unit,
{"mu": R, "sigma": Rplus},
lambda value, mu, sigma: (
sp.norm.logpdf(logit(value), mu, sigma) - (np.log(value) + np.log1p(-value))
),
decimal=select_by_precision(float64=6, float32=1),
)
def test_multidimensional_beta_construction(self):
with Model():
Beta("beta", alpha=1.0, beta=1.0, shape=(10, 20))
def test_rice(self):
self.pymc3_matches_scipy(
Rice,
Rplus,
{"nu": Rplus, "sigma": Rplusbig},
lambda value, nu, sigma: sp.rice.logpdf(value, b=nu / sigma, loc=0, scale=sigma),
)
self.pymc3_matches_scipy(
Rice,
Rplus,
{"b": Rplus, "sigma": Rplusbig},
lambda value, b, sigma: sp.rice.logpdf(value, b=b, loc=0, scale=sigma),
)
@pytest.mark.xfail(condition=(theano.config.floatX == "float32"), reason="Fails on float32")
def test_moyal(self):
self.pymc3_matches_scipy(
Moyal,
R,
{"mu": R, "sigma": Rplusbig},
lambda value, mu, sigma: floatX(sp.moyal.logpdf(value, mu, sigma)),
)
self.check_logcdf(
Moyal,
R,
{"mu": R, "sigma": Rplusbig},
lambda value, mu, sigma: floatX(sp.moyal.logcdf(value, mu, sigma)),
)
@pytest.mark.xfail(condition=(theano.config.floatX == "float32"), reason="Fails on float32")
def test_interpolated(self):
for mu in R.vals:
for sigma in Rplus.vals:
xmin = mu - 5 * sigma
xmax = mu + 5 * sigma
class TestedInterpolated(Interpolated):
def __init__(self, **kwargs):
x_points = np.linspace(xmin, xmax, 100000)
pdf_points = sp.norm.pdf(x_points, loc=mu, scale=sigma)
super().__init__(x_points=x_points, pdf_points=pdf_points, **kwargs)
def ref_pdf(value):
return np.where(
np.logical_and(value >= xmin, value <= xmax),
sp.norm.logpdf(value, mu, sigma),
-np.inf * np.ones(value.shape),
)
self.pymc3_matches_scipy(TestedInterpolated, R, {}, ref_pdf)
def test_bound():
np.random.seed(42)
UnboundNormal = Bound(Normal)
dist = UnboundNormal.dist(mu=0, sigma=1)
assert dist.transform is None
assert dist.default() == 0.0
assert isinstance(dist.random(), np.ndarray)
LowerNormal = Bound(Normal, lower=1)
dist = LowerNormal.dist(mu=0, sigma=1)
assert dist.logp(0).eval() == -np.inf
assert dist.default() > 1
assert dist.transform is not None
assert np.all(dist.random() > 1)
UpperNormal = Bound(Normal, upper=-1)
dist = UpperNormal.dist(mu=0, sigma=1)
assert dist.logp(-0.5).eval() == -np.inf
assert dist.default() < -1
assert dist.transform is not None
assert np.all(dist.random() < -1)
ArrayNormal = Bound(Normal, lower=[1, 2], upper=[2, 3])
dist = ArrayNormal.dist(mu=0, sigma=1, shape=2)
assert_equal(dist.logp([0.5, 3.5]).eval(), -np.array([np.inf, np.inf]))
assert_equal(dist.default(), np.array([1.5, 2.5]))
assert dist.transform is not None
with pytest.raises(ValueError) as err:
dist.random()
err.match("Drawing samples from distributions with array-valued")
with Model():
a = ArrayNormal("c", shape=2)
assert_equal(a.tag.test_value, np.array([1.5, 2.5]))
lower = tt.vector("lower")
lower.tag.test_value = np.array([1, 2]).astype(theano.config.floatX)
upper = 3
ArrayNormal = Bound(Normal, lower=lower, upper=upper)
dist = ArrayNormal.dist(mu=0, sigma=1, shape=2)
logp = dist.logp([0.5, 3.5]).eval({lower: lower.tag.test_value})
assert_equal(logp, -np.array([np.inf, np.inf]))
assert_equal(dist.default(), np.array([2, 2.5]))
assert dist.transform is not None
with Model():
a = ArrayNormal("c", shape=2)
assert_equal(a.tag.test_value, np.array([2, 2.5]))
rand = Bound(Binomial, lower=10).dist(n=20, p=0.3).random()
assert rand.dtype in [np.int16, np.int32, np.int64]
assert rand >= 10
rand = Bound(Binomial, upper=10).dist(n=20, p=0.8).random()
assert rand.dtype in [np.int16, np.int32, np.int64]
assert rand <= 10
rand = Bound(Binomial, lower=5, upper=8).dist(n=10, p=0.6).random()
assert rand.dtype in [np.int16, np.int32, np.int64]
assert rand >= 5 and rand <= 8
with Model():
BoundPoisson = Bound(Poisson, upper=6)
BoundPoisson(name="y", mu=1)
with Model():
BoundNormalNamedArgs = Bound(Normal, upper=6)("y", mu=2.0, sd=1.0)
BoundNormalPositionalArgs = Bound(Normal, upper=6)("x", 2.0, 1.0)
with Model():
BoundPoissonNamedArgs = Bound(Poisson, upper=6)("y", mu=2.0)
BoundPoissonPositionalArgs = Bound(Poisson, upper=6)("x", 2.0)
class TestStrAndLatexRepr:
def setup_class(self):
alpha, sigma = 1, 1
beta = [1, 2.5]
size = 100
X = np.random.normal(size=(size, 2)).dot(np.array([[1, 0], [0, 0.2]]))
Y = alpha + X.dot(beta) + np.random.randn(size) * sigma
with Model() as self.model:
alpha = Normal("alpha", mu=0, sigma=10)
b = Normal("beta", mu=0, sigma=10, shape=(2,), observed=beta)
sigma = HalfNormal("sigma", sigma=1)
Z = MvNormal("Z", mu=np.zeros(2), chol=np.eye(2), shape=(2,))
nb1 = pm.NegativeBinomial(
"nb_with_mu_alpha", mu=pm.Normal("nbmu"), alpha=pm.Gamma("nbalpha", mu=6, sigma=1)
)
nb2 = pm.NegativeBinomial("nb_with_p_n", p=pm.Uniform("nbp"), n=10)
mu = Deterministic("mu", floatX(alpha + tt.dot(X, b)))
bound_var = Bound(Normal, lower=1.0)("bound_var", mu=0, sigma=10)
n, m = 3, 4
covs = [np.eye(n), np.eye(m)]
kron_normal = KroneckerNormal("kron_normal", mu=np.zeros(n * m), covs=covs, shape=n * m)
matrix_normal = MatrixNormal(
"mat_normal",
mu=np.random.normal(size=n),
rowcov=np.eye(n),
colchol=np.linalg.cholesky(np.eye(n)),
shape=(n, n),
)
Y_obs = Normal("Y_obs", mu=mu, sigma=sigma, observed=Y)
self.distributions = [alpha, sigma, mu, b, Z, nb1, nb2, Y_obs, bound_var]
self.expected = {
"latex": (
r"$\text{alpha} \sim \text{Normal}$",
r"$\text{sigma} \sim \text{HalfNormal}$",
r"$\text{mu} \sim \text{Deterministic}$",
r"$\text{beta} \sim \text{Normal}$",
r"$\text{Z} \sim \text{MvNormal}$",
r"$\text{nb_with_mu_alpha} \sim \text{NegativeBinomial}$",
r"$\text{nb_with_p_n} \sim \text{NegativeBinomial}$",
r"$\text{Y_obs} \sim \text{Normal}$",
r"$\text{bound_var} \sim \text{Bound}$ -- \text{Normal}$",
r"$\text{kron_normal} \sim \text{KroneckerNormal}$",
r"$\text{mat_normal} \sim \text{MatrixNormal}$",
),
"plain": (
r"alpha ~ Normal",
r"sigma ~ HalfNormal",
r"mu ~ Deterministic",
r"beta ~ Normal",
r"Z ~ MvNormal",
r"nb_with_mu_alpha ~ NegativeBinomial",
r"nb_with_p_n ~ NegativeBinomial",
r"Y_obs ~ Normal",
r"bound_var ~ Bound-Normal",
r"kron_normal ~ KroneckerNormal",
r"mat_normal ~ MatrixNormal",
),
"latex_with_params": (
r"$\text{alpha} \sim \text{Normal}(\mathit{mu}=0.0,~\mathit{sigma}=10.0)$",
r"$\text{sigma} \sim \text{HalfNormal}(\mathit{sigma}=1.0)$",
r"$\text{mu} \sim \text{Deterministic}(\text{alpha},~\text{Constant},~\text{beta})$",
r"$\text{beta} \sim \text{Normal}(\mathit{mu}=0.0,~\mathit{sigma}=10.0)$",
r"$\text{Z} \sim \text{MvNormal}(\mathit{mu}=array,~\mathit{chol_cov}=array)$",
r"$\text{nb_with_mu_alpha} \sim \text{NegativeBinomial}(\mathit{mu}=\text{nbmu},~\mathit{alpha}=\text{nbalpha})$",
r"$\text{nb_with_p_n} \sim \text{NegativeBinomial}(\mathit{p}=\text{nbp},~\mathit{n}=10)$",
r"$\text{Y_obs} \sim \text{Normal}(\mathit{mu}=\text{mu},~\mathit{sigma}=f(\text{sigma}))$",
r"$\text{bound_var} \sim \text{Bound}(\mathit{lower}=1.0,~\mathit{upper}=\text{None})$ -- \text{Normal}(\mathit{mu}=0.0,~\mathit{sigma}=10.0)$",
r"$\text{kron_normal} \sim \text{KroneckerNormal}(\mathit{mu}=array)$",
r"$\text{mat_normal} \sim \text{MatrixNormal}(\mathit{mu}=array,~\mathit{rowcov}=array,~\mathit{colchol_cov}=array)$",
),
"plain_with_params": (
r"alpha ~ Normal(mu=0.0, sigma=10.0)",
r"sigma ~ HalfNormal(sigma=1.0)",
r"mu ~ Deterministic(alpha, Constant, beta)",
r"beta ~ Normal(mu=0.0, sigma=10.0)",
r"Z ~ MvNormal(mu=array, chol_cov=array)",
r"nb_with_mu_alpha ~ NegativeBinomial(mu=nbmu, alpha=nbalpha)",
r"nb_with_p_n ~ NegativeBinomial(p=nbp, n=10)",
r"Y_obs ~ Normal(mu=mu, sigma=f(sigma))",
r"bound_var ~ Bound(lower=1.0, upper=None)-Normal(mu=0.0, sigma=10.0)",
r"kron_normal ~ KroneckerNormal(mu=array)",
r"mat_normal ~ MatrixNormal(mu=array, rowcov=array, colchol_cov=array)",
),
}
def test__repr_latex_(self):
for distribution, tex in zip(self.distributions, self.expected["latex_with_params"]):
assert distribution._repr_latex_() == tex
model_tex = self.model._repr_latex_()
for tex in self.expected["latex"]:
for segment in tex.strip("$").split(r"\sim"):
assert segment in model_tex
def test___latex__(self):
for distribution, tex in zip(self.distributions, self.expected["latex_with_params"]):
assert distribution._repr_latex_() == distribution.__latex__()
assert self.model._repr_latex_() == self.model.__latex__()
def test___str__(self):
for distribution, str_repr in zip(self.distributions, self.expected["plain"]):
assert distribution.__str__() == str_repr
model_str = self.model.__str__()
for str_repr in self.expected["plain"]:
assert str_repr in model_str
def test_str(self):
for distribution, str_repr in zip(self.distributions, self.expected["plain"]):
assert str(distribution) == str_repr
model_str = str(self.model)
for str_repr in self.expected["plain"]:
assert str_repr in model_str
def test_discrete_trafo():
with pytest.raises(ValueError) as err:
Binomial.dist(n=5, p=0.5, transform="log")
err.match("Transformations for discrete distributions")
with Model():
with pytest.raises(ValueError) as err:
Binomial("a", n=5, p=0.5, transform="log")
err.match("Transformations for discrete distributions")
@pytest.mark.parametrize("shape", [tuple(), (1,), (3, 1), (3, 2)], ids=str)
def test_orderedlogistic_dimensions(shape):
loge = np.log10(np.exp(1))
size = 7
p = np.ones(shape + (10,)) / 10
cutpoints = np.tile(logit(np.linspace(0, 1, 11)[1:-1]), shape + (1,))
obs = np.random.randint(0, 1, size=(size,) + shape)
with Model():
ol = OrderedLogistic(
"ol", eta=np.zeros(shape), cutpoints=cutpoints, shape=shape, observed=obs
)
c = Categorical("c", p=p, shape=shape, observed=obs)
ologp = ol.logp({"ol": 1}) * loge
clogp = c.logp({"c": 1}) * loge
expected = -np.prod((size,) + shape)
assert c.distribution.p.ndim == (len(shape) + 1)
assert np.allclose(clogp, expected)
assert ol.distribution.p.ndim == (len(shape) + 1)
assert np.allclose(ologp, expected)
class TestBugfixes:
@pytest.mark.parametrize(
"dist_cls,kwargs", [(MvNormal, dict(mu=0)), (MvStudentT, dict(mu=0, nu=2))]
)
@pytest.mark.parametrize("dims", [1, 2, 4])
def test_issue_3051(self, dims, dist_cls, kwargs):
d = dist_cls.dist(**kwargs, cov=np.eye(dims), shape=(dims,))
X = np.random.normal(size=(20, dims))
actual_t = d.logp(X)
assert isinstance(actual_t, tt.TensorVariable)
actual_a = actual_t.eval()
assert isinstance(actual_a, np.ndarray)
assert actual_a.shape == (X.shape[0],)
pass
def test_serialize_density_dist():
def func(x):
return -2 * (x ** 2).sum()
with pm.Model():
pm.Normal("x")
y = pm.DensityDist("y", func)
pm.sample(draws=5, tune=1, mp_ctx="spawn")
import pickle
pickle.loads(pickle.dumps(y))
| true | true |
f7210284a779e313ff51ffedd4c249f391eb87fd | 121 | py | Python | data_prepare.py | yangsgit/Spam_Filter | 7003101f35d72bcdee50763addef25901bc1fdf4 | [
"MIT"
] | 1 | 2019-02-08T18:26:39.000Z | 2019-02-08T18:26:39.000Z | data_prepare.py | yangsgit/Spam_Filter | 7003101f35d72bcdee50763addef25901bc1fdf4 | [
"MIT"
] | null | null | null | data_prepare.py | yangsgit/Spam_Filter | 7003101f35d72bcdee50763addef25901bc1fdf4 | [
"MIT"
] | null | null | null | # 1 read file
# 2 clean data
# 3 tokenize
# 4 eleminate stop words
# 5 calculate tfidf matrix
def read_file(file_path):
| 15.125 | 26 | 0.735537 |
def read_file(file_path):
| false | true |
f72103e31fd52dd21e230b7d278470e15c333340 | 4,056 | py | Python | volttron/platform/agent/math_utils.py | Entek-Technical-Services/BEMOSS3.5 | 581a205b4129530474a5ceee93cb36ef62992d4c | [
"BSD-3-Clause"
] | 73 | 2017-07-11T21:46:41.000Z | 2022-03-11T03:35:25.000Z | volttron/platform/agent/math_utils.py | Entek-Technical-Services/BEMOSS3.5 | 581a205b4129530474a5ceee93cb36ef62992d4c | [
"BSD-3-Clause"
] | 19 | 2017-10-10T22:06:15.000Z | 2022-03-28T21:03:33.000Z | volttron/platform/agent/math_utils.py | Entek-Technical-Services/BEMOSS3.5 | 581a205b4129530474a5ceee93cb36ef62992d4c | [
"BSD-3-Clause"
] | 36 | 2017-06-24T00:17:03.000Z | 2022-03-31T13:58:36.000Z | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2015, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
#}}}
'''Dumping ground for VOLTTRON platform™ agent math helper functions.
Not meant to replace numpy in all cases. A basic set common math
routines to remove the need for numpy in simple cases.
This module should NEVER import numpy as that would defeat the
purpose.'''
def mean(data):
"""Return the sample arithmetic mean of data."""
n = len(data)
if n < 1:
raise ValueError('mean requires at least one data point')
return sum(data)/n # in Python 2 use sum(data)/float(n)
def _ss(data):
"""Return sum of square deviations of sequence data."""
c = mean(data)
ss = sum((x-c)**2 for x in data)
return ss
def pstdev(data):
"""Calculates the population standard deviation."""
n = len(data)
if n < 2:
raise ValueError('variance requires at least two data points')
ss = _ss(data)
pvar = ss/n # the population variance
return pvar**0.5
def stdev(data):
"""Calculates the sample standard deviation."""
n = len(data)
if n < 2:
raise ValueError('variance requires at least two data points')
ss = _ss(data)
pvar = ss/(n-1) # sample variance
return pvar**0.5
| 41.387755 | 72 | 0.747288 |
def mean(data):
n = len(data)
if n < 1:
raise ValueError('mean requires at least one data point')
return sum(data)/n
def _ss(data):
c = mean(data)
ss = sum((x-c)**2 for x in data)
return ss
def pstdev(data):
n = len(data)
if n < 2:
raise ValueError('variance requires at least two data points')
ss = _ss(data)
pvar = ss/n
return pvar**0.5
def stdev(data):
n = len(data)
if n < 2:
raise ValueError('variance requires at least two data points')
ss = _ss(data)
pvar = ss/(n-1)
return pvar**0.5
| true | true |
f721048f673e5d2667f9c29872d43caa5b8b8721 | 25,162 | py | Python | blender/.blender/scripts/ac3d_export.py | visnz/sketchfab_download | 976f667d5c2c2864b2bad65aceac0dab5ce51b74 | [
"Apache-2.0"
] | 41 | 2021-02-18T05:56:26.000Z | 2021-12-06T07:58:15.000Z | blender/.blender/scripts/ac3d_export.py | visnz/sketchfab_download | 976f667d5c2c2864b2bad65aceac0dab5ce51b74 | [
"Apache-2.0"
] | 19 | 2021-02-18T05:59:03.000Z | 2022-01-13T01:00:52.000Z | blender/.blender/scripts/ac3d_export.py | visnz/sketchfab_download | 976f667d5c2c2864b2bad65aceac0dab5ce51b74 | [
"Apache-2.0"
] | 18 | 2021-02-22T13:32:56.000Z | 2022-01-22T12:38:29.000Z | #!BPY
""" Registration info for Blender menus:
Name: 'AC3D (.ac)...'
Blender: 243
Group: 'Export'
Tip: 'Export selected meshes to AC3D (.ac) format'
"""
__author__ = "Willian P. Germano"
__url__ = ("blender", "blenderartists.org", "AC3D's homepage, http://www.ac3d.org",
"PLib 3d gaming lib, http://plib.sf.net")
__version__ = "2.44 2007-05-05"
__bpydoc__ = """\
This script exports selected Blender meshes to AC3D's .ac file format.
AC3D is a simple commercial 3d modeller also built with OpenGL.
The .ac file format is an easy to parse text format well supported,
for example, by the PLib 3d gaming library (AC3D 3.x).
Supported:<br>
UV-textured meshes with hierarchy (grouping) information.
Missing:<br>
The 'url' tag, specific to AC3D. It is easy to add by hand to the exported
file, if needed.
Known issues:<br>
The ambient and emit data we can retrieve from Blender are single values,
that this script copies to R, G, B, giving shades of gray.<br>
Loose edges (lines) receive the first material found in the mesh, if any, or a default white material.<br>
In AC3D 4 "compatibility mode":<br>
- shininess of materials is taken from the shader specularity value in Blender, mapped from [0.0, 2.0] to [0, 128];<br>
- crease angle is exported, but in Blender it is limited to [1, 80], since there are other more powerful ways to control surface smoothing. In AC3D 4.0 crease's range is [0.0, 180.0];
Config Options:<br>
toggle:<br>
- AC3D 4 mode: unset it to export without the 'crease' tag that was
introduced with AC3D 4.0 and with the old material handling;<br>
- global coords: transform all vertices of all meshes to global coordinates;<br>
- skip data: set it if you don't want mesh names (ME:, not OB: field)
to be exported as strings for AC's "data" tags (19 chars max);<br>
- rgb mirror color can be exported as ambient and/or emissive if needed,
since Blender handles these differently;<br>
- default mat: a default (white) material is added if some mesh was
left without mats -- it's better to always add your own materials;<br>
- no split: don't split meshes (see above);<br>
- set texture dir: override the actual textures path with a given default
path (or simply export the texture names, without dir info, if the path is
empty);<br>
- per face 1 or 2 sided: override the "Double Sided" button that defines this behavior per whole mesh in favor of the UV Face Select mode "twosided" per face atribute;<br>
- only selected: only consider selected objects when looking for meshes
to export (read notes below about tokens, too);<br>
strings:<br>
- export dir: default dir to export to;<br>
- texture dir: override textures path with this path if 'set texture dir'
toggle is "on".
Notes:<br>
This version updates:<br>
- modified meshes are correctly exported, no need to apply the modifiers in Blender;<br>
- correctly export each used material, be it assigned to the object or to its mesh data;<br>
- exporting lines (edges) is again supported; color comes from first material found in the mesh, if any, or a default white one.<br>
- there's a new option to choose between exporting meshes with transformed (global) coordinates or local ones;<br>
Multiple textures per mesh are supported (mesh gets split);<br>
Parents are exported as a group containing both the parent and its children;<br>
Start mesh object names (OB: field) with "!" or "#" if you don't want them to be exported;<br>
Start mesh object names (OB: field) with "=" or "$" to prevent them from being split (meshes with multiple textures or both textured and non textured faces are split unless this trick is used or the "no split" option is set.
"""
# $Id: ac3d_export.py 14530 2008-04-23 14:04:05Z campbellbarton $
#
# --------------------------------------------------------------------------
# AC3DExport version 2.44
# Program versions: Blender 2.42+ and AC3Db files (means version 0xb)
# new: updated for new Blender version and Mesh module; supports lines (edges) again;
# option to export vertices transformed to global coordinates or not; now the modified
# (by existing mesh modifiers) mesh is exported; materials are properly exported, no
# matter if each of them is linked to the mesh or to the object. New (2.43.1): loose
# edges use color of first material found in the mesh, if any.
# --------------------------------------------------------------------------
# Thanks: Steve Baker for discussions and inspiration; for testing, bug
# reports, suggestions, patches: David Megginson, Filippo di Natale,
# Franz Melchior, Campbell Barton, Josh Babcock, Ralf Gerlich, Stewart Andreason.
# --------------------------------------------------------------------------
# ***** BEGIN GPL LICENSE BLOCK *****
#
# Copyright (C) 2004-2007: Willian P. Germano, wgermano _at_ ig.com.br
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# --------------------------------------------------------------------------
import Blender
from Blender import Object, Mesh, Material, Image, Mathutils, Registry
from Blender import sys as bsys
# Globals
REPORT_DATA = {
'main': [],
'errors': [],
'warns': [],
'nosplit': [],
'noexport': []
}
TOKENS_DONT_EXPORT = ['!', '#']
TOKENS_DONT_SPLIT = ['=', '$']
MATIDX_ERROR = 0
# flags:
LOOSE = Mesh.EdgeFlags['LOOSE']
FACE_TWOSIDED = Mesh.FaceModes['TWOSIDE']
MESH_TWOSIDED = Mesh.Modes['TWOSIDED']
REG_KEY = 'ac3d_export'
# config options:
GLOBAL_COORDS = True
SKIP_DATA = False
MIRCOL_AS_AMB = False
MIRCOL_AS_EMIS = False
ADD_DEFAULT_MAT = True
SET_TEX_DIR = True
TEX_DIR = ''
AC3D_4 = True # export crease value, compatible with AC3D 4 loaders
NO_SPLIT = False
ONLY_SELECTED = True
EXPORT_DIR = ''
PER_FACE_1_OR_2_SIDED = True
tooltips = {
'GLOBAL_COORDS': "transform all vertices of all meshes to global coordinates",
'SKIP_DATA': "don't export mesh names as data fields",
'MIRCOL_AS_AMB': "export mirror color as ambient color",
'MIRCOL_AS_EMIS': "export mirror color as emissive color",
'ADD_DEFAULT_MAT': "always add a default white material",
'SET_TEX_DIR': "don't export default texture paths (edit also \"tex dir\")",
'EXPORT_DIR': "default / last folder used to export .ac files to",
'TEX_DIR': "(see \"set tex dir\") dir to prepend to all exported texture names (leave empty for no dir)",
'AC3D_4': "compatibility mode, adds 'crease' tag and slightly better material support",
'NO_SPLIT': "don't split meshes with multiple textures (or both textured and non textured polygons)",
'ONLY_SELECTED': "export only selected objects",
'PER_FACE_1_OR_2_SIDED': "override \"Double Sided\" button in favor of per face \"twosided\" attribute (UV Face Select mode)"
}
def update_RegistryInfo():
d = {}
d['SKIP_DATA'] = SKIP_DATA
d['MIRCOL_AS_AMB'] = MIRCOL_AS_AMB
d['MIRCOL_AS_EMIS'] = MIRCOL_AS_EMIS
d['ADD_DEFAULT_MAT'] = ADD_DEFAULT_MAT
d['SET_TEX_DIR'] = SET_TEX_DIR
d['TEX_DIR'] = TEX_DIR
d['AC3D_4'] = AC3D_4
d['NO_SPLIT'] = NO_SPLIT
d['EXPORT_DIR'] = EXPORT_DIR
d['ONLY_SELECTED'] = ONLY_SELECTED
d['PER_FACE_1_OR_2_SIDED'] = PER_FACE_1_OR_2_SIDED
d['tooltips'] = tooltips
d['GLOBAL_COORDS'] = GLOBAL_COORDS
Registry.SetKey(REG_KEY, d, True)
# Looking for a saved key in Blender.Registry dict:
rd = Registry.GetKey(REG_KEY, True)
if rd:
try:
AC3D_4 = rd['AC3D_4']
SKIP_DATA = rd['SKIP_DATA']
MIRCOL_AS_AMB = rd['MIRCOL_AS_AMB']
MIRCOL_AS_EMIS = rd['MIRCOL_AS_EMIS']
ADD_DEFAULT_MAT = rd['ADD_DEFAULT_MAT']
SET_TEX_DIR = rd['SET_TEX_DIR']
TEX_DIR = rd['TEX_DIR']
EXPORT_DIR = rd['EXPORT_DIR']
ONLY_SELECTED = rd['ONLY_SELECTED']
NO_SPLIT = rd['NO_SPLIT']
PER_FACE_1_OR_2_SIDED = rd['PER_FACE_1_OR_2_SIDED']
GLOBAL_COORDS = rd['GLOBAL_COORDS']
except KeyError: update_RegistryInfo()
else:
update_RegistryInfo()
VERBOSE = True
CONFIRM_OVERWRITE = True
# check General scripts config key for default behaviors
rd = Registry.GetKey('General', True)
if rd:
try:
VERBOSE = rd['verbose']
CONFIRM_OVERWRITE = rd['confirm_overwrite']
except: pass
# The default material to be used when necessary (see ADD_DEFAULT_MAT)
DEFAULT_MAT = \
'MATERIAL "DefaultWhite" rgb 1 1 1 amb 1 1 1 emis 0 0 0 \
spec 0.5 0.5 0.5 shi 64 trans 0'
# This transformation aligns Blender and AC3D coordinate systems:
BLEND_TO_AC3D_MATRIX = Mathutils.Matrix([1,0,0,0], [0,0,-1,0], [0,1,0,0], [0,0,0,1])
def Round_s(f):
"Round to default precision and turn value to a string"
r = round(f,6) # precision set to 10e-06
if r == int(r):
return str(int(r))
else:
return str(r)
def transform_verts(verts, m):
vecs = []
for v in verts:
x, y, z = v.co
vec = Mathutils.Vector([x, y, z, 1])
vecs.append(vec*m)
return vecs
def get_loose_edges(mesh):
loose = LOOSE
return [e for e in mesh.edges if e.flag & loose]
# ---
# meshes with more than one texture assigned
# are split and saved as these foomeshes
class FooMesh:
class FooVert:
def __init__(self, v):
self.v = v
self.index = 0
class FooFace:
def __init__(self, foomesh, f):
self.f = f
foov = foomesh.FooVert
self.v = [foov(f.v[0]), foov(f.v[1])]
len_fv = len(f.v)
if len_fv > 2 and f.v[2]:
self.v.append(foov(f.v[2]))
if len_fv > 3 and f.v[3]: self.v.append(foov(f.v[3]))
def __getattr__(self, attr):
if attr == 'v': return self.v
return getattr(self.f, attr)
def __len__(self):
return len(self.f)
def __init__(self, tex, faces, mesh):
self.name = mesh.name
self.mesh = mesh
self.looseEdges = []
self.faceUV = mesh.faceUV
self.degr = mesh.degr
vidxs = [0]*len(mesh.verts)
foofaces = []
for f in faces:
foofaces.append(self.FooFace(self, f))
for v in f.v:
if v: vidxs[v.index] = 1
i = 0
fooverts = []
for v in mesh.verts:
if vidxs[v.index]:
fooverts.append(v)
vidxs[v.index] = i
i += 1
for f in foofaces:
for v in f.v:
if v: v.index = vidxs[v.v.index]
self.faces = foofaces
self.verts = fooverts
class AC3DExport: # the ac3d exporter part
def __init__(self, scene_objects, file):
global ARG, SKIP_DATA, ADD_DEFAULT_MAT, DEFAULT_MAT
header = 'AC3Db'
self.file = file
self.buf = ''
self.mbuf = []
self.mlist = []
world_kids = 0
parents_list = self.parents_list = []
kids_dict = self.kids_dict = {}
objs = []
exp_objs = self.exp_objs = []
tree = {}
file.write(header+'\n')
objs = \
[o for o in scene_objects if o.type in ['Mesh', 'Empty']]
# create a tree from parents to children objects
for obj in objs[:]:
parent = obj.parent
lineage = [obj]
while parent:
parents_list.append(parent.name)
obj = parent
parent = parent.getParent()
lineage.insert(0, obj)
d = tree
for i in xrange(len(lineage)):
lname = lineage[i].getType()[:2] + lineage[i].name
if lname not in d.keys():
d[lname] = {}
d = d[lname]
# traverse the tree to get an ordered list of names of objects to export
self.traverse_dict(tree)
world_kids = len(tree.keys())
# get list of objects to export, start writing the .ac file
objlist = [Object.Get(name) for name in exp_objs]
meshlist = [o for o in objlist if o.type == 'Mesh']
# create a temporary mesh to hold actual (modified) mesh data
TMP_mesh = Mesh.New('tmp_for_ac_export')
# write materials
self.MATERIALS(meshlist, TMP_mesh)
mbuf = self.mbuf
if not mbuf or ADD_DEFAULT_MAT:
mbuf.insert(0, "%s\n" % DEFAULT_MAT)
mbuf = "".join(mbuf)
file.write(mbuf)
file.write('OBJECT world\nkids %s\n' % world_kids)
# write the objects
for obj in objlist:
self.obj = obj
objtype = obj.type
objname = obj.name
kidsnum = kids_dict[objname]
# A parent plus its children are exported as a group.
# If the parent is a mesh, its rot and loc are exported as the
# group rot and loc and the mesh (w/o rot and loc) is added to the group.
if kidsnum:
self.OBJECT('group')
self.name(objname)
if objtype == 'Mesh':
kidsnum += 1
if not GLOBAL_COORDS:
localmatrix = obj.getMatrix('localspace')
if not obj.getParent():
localmatrix *= BLEND_TO_AC3D_MATRIX
self.rot(localmatrix.rotationPart())
self.loc(localmatrix.translationPart())
self.kids(kidsnum)
if objtype == 'Mesh':
mesh = TMP_mesh # temporary mesh to hold actual (modified) mesh data
mesh.getFromObject(objname)
self.mesh = mesh
if mesh.faceUV:
meshes = self.split_mesh(mesh)
else:
meshes = [mesh]
if len(meshes) > 1:
if NO_SPLIT or self.dont_split(objname):
self.export_mesh(mesh, ob)
REPORT_DATA['nosplit'].append(objname)
else:
self.OBJECT('group')
self.name(objname)
self.kids(len(meshes))
counter = 0
for me in meshes:
self.export_mesh(me, obj,
name = '%s_%s' % (obj.name, counter), foomesh = True)
self.kids()
counter += 1
else:
self.export_mesh(mesh, obj)
self.kids()
def traverse_dict(self, d):
kids_dict = self.kids_dict
exp_objs = self.exp_objs
keys = d.keys()
keys.sort() # sort for predictable output
keys.reverse()
for k in keys:
objname = k[2:]
klen = len(d[k])
kids_dict[objname] = klen
if self.dont_export(objname):
d.pop(k)
parent = Object.Get(objname).getParent()
if parent: kids_dict[parent.name] -= 1
REPORT_DATA['noexport'].append(objname)
continue
if klen:
self.traverse_dict(d[k])
exp_objs.insert(0, objname)
else:
if k.find('Em', 0) == 0: # Empty w/o children
d.pop(k)
parent = Object.Get(objname).getParent()
if parent: kids_dict[parent.name] -= 1
else:
exp_objs.insert(0, objname)
def dont_export(self, name): # if name starts with '!' or '#'
length = len(name)
if length >= 1:
if name[0] in TOKENS_DONT_EXPORT: # '!' or '#' doubled (escaped): export
if length > 1 and name[1] == name[0]:
return 0
return 1
def dont_split(self, name): # if name starts with '=' or '$'
length = len(name)
if length >= 1:
if name[0] in TOKENS_DONT_SPLIT: # '=' or '$' doubled (escaped): split
if length > 1 and name[1] == name[0]:
return 0
return 1
def split_mesh(self, mesh):
tex_dict = {0:[]}
for f in mesh.faces:
if f.image:
if not f.image.name in tex_dict: tex_dict[f.image.name] = []
tex_dict[f.image.name].append(f)
else: tex_dict[0].append(f)
keys = tex_dict.keys()
len_keys = len(keys)
if not tex_dict[0]:
len_keys -= 1
tex_dict.pop(0)
keys.remove(0)
elif len_keys > 1:
lines = []
anyimgkey = [k for k in keys if k != 0][0]
for f in tex_dict[0]:
if len(f.v) < 3:
lines.append(f)
if len(tex_dict[0]) == len(lines):
for l in lines:
tex_dict[anyimgkey].append(l)
len_keys -= 1
tex_dict.pop(0)
if len_keys > 1:
foo_meshes = []
for k in keys:
faces = tex_dict[k]
foo_meshes.append(FooMesh(k, faces, mesh))
foo_meshes[0].edges = get_loose_edges(mesh)
return foo_meshes
return [mesh]
def export_mesh(self, mesh, obj, name = None, foomesh = False):
file = self.file
self.OBJECT('poly')
if not name: name = obj.name
self.name(name)
if not SKIP_DATA:
meshname = obj.getData(name_only = True)
self.data(len(meshname), meshname)
if mesh.faceUV:
texline = self.texture(mesh.faces)
if texline: file.write(texline)
if AC3D_4:
self.crease(mesh.degr)
# If exporting using local coordinates, children object coordinates should not be
# transformed to ac3d's coordinate system, since that will be accounted for in
# their topmost parents (the parents w/o parents) transformations.
if not GLOBAL_COORDS:
# We hold parents in a list, so they also don't get transformed,
# because for each parent we create an ac3d group to hold both the
# parent and its children.
if obj.name not in self.parents_list:
localmatrix = obj.getMatrix('localspace')
if not obj.getParent():
localmatrix *= BLEND_TO_AC3D_MATRIX
self.rot(localmatrix.rotationPart())
self.loc(localmatrix.translationPart())
matrix = None
else:
matrix = obj.getMatrix() * BLEND_TO_AC3D_MATRIX
self.numvert(mesh.verts, matrix)
self.numsurf(mesh, foomesh)
def MATERIALS(self, meshlist, me):
for meobj in meshlist:
me.getFromObject(meobj)
mats = me.materials
mbuf = []
mlist = self.mlist
for m in mats:
if not m: continue
name = m.name
if name not in mlist:
mlist.append(name)
M = Material.Get(name)
material = 'MATERIAL "%s"' % name
mirCol = "%s %s %s" % (Round_s(M.mirCol[0]), Round_s(M.mirCol[1]),
Round_s(M.mirCol[2]))
rgb = "rgb %s %s %s" % (Round_s(M.R), Round_s(M.G), Round_s(M.B))
ambval = Round_s(M.amb)
amb = "amb %s %s %s" % (ambval, ambval, ambval)
spec = "spec %s %s %s" % (Round_s(M.specCol[0]),
Round_s(M.specCol[1]), Round_s(M.specCol[2]))
if AC3D_4:
emit = Round_s(M.emit)
emis = "emis %s %s %s" % (emit, emit, emit)
shival = int(M.spec * 64)
else:
emis = "emis 0 0 0"
shival = 72
shi = "shi %s" % shival
trans = "trans %s" % (Round_s(1 - M.alpha))
if MIRCOL_AS_AMB:
amb = "amb %s" % mirCol
if MIRCOL_AS_EMIS:
emis = "emis %s" % mirCol
mbuf.append("%s %s %s %s %s %s %s\n" \
% (material, rgb, amb, emis, spec, shi, trans))
self.mlist = mlist
self.mbuf.append("".join(mbuf))
def OBJECT(self, type):
self.file.write('OBJECT %s\n' % type)
def name(self, name):
if name[0] in TOKENS_DONT_EXPORT or name[0] in TOKENS_DONT_SPLIT:
if len(name) > 1: name = name[1:]
self.file.write('name "%s"\n' % name)
def kids(self, num = 0):
self.file.write('kids %s\n' % num)
def data(self, num, str):
self.file.write('data %s\n%s\n' % (num, str))
def texture(self, faces):
tex = ""
for f in faces:
if f.image:
tex = f.image.name
break
if tex:
image = Image.Get(tex)
texfname = image.filename
if SET_TEX_DIR:
texfname = bsys.basename(texfname)
if TEX_DIR:
texfname = bsys.join(TEX_DIR, texfname)
buf = 'texture "%s"\n' % texfname
xrep = image.xrep
yrep = image.yrep
buf += 'texrep %s %s\n' % (xrep, yrep)
self.file.write(buf)
def rot(self, matrix):
rot = ''
not_I = 0 # not identity
matstr = []
for i in [0, 1, 2]:
r = map(Round_s, matrix[i])
not_I += (r[0] != '0')+(r[1] != '0')+(r[2] != '0')
not_I -= (r[i] == '1')
for j in [0, 1, 2]:
matstr.append(' %s' % r[j])
if not_I: # no need to write identity
self.file.write('rot%s\n' % "".join(matstr))
def loc(self, loc):
loc = map(Round_s, loc)
if loc != ['0', '0', '0']: # no need to write default
self.file.write('loc %s %s %s\n' % (loc[0], loc[1], loc[2]))
def crease(self, crease):
self.file.write('crease %f\n' % crease)
def numvert(self, verts, matrix):
file = self.file
nvstr = []
nvstr.append("numvert %s\n" % len(verts))
if matrix:
verts = transform_verts(verts, matrix)
for v in verts:
v = map (Round_s, v)
nvstr.append("%s %s %s\n" % (v[0], v[1], v[2]))
else:
for v in verts:
v = map(Round_s, v.co)
nvstr.append("%s %s %s\n" % (v[0], v[1], v[2]))
file.write("".join(nvstr))
def numsurf(self, mesh, foomesh = False):
global MATIDX_ERROR
# local vars are faster and so better in tight loops
lc_ADD_DEFAULT_MAT = ADD_DEFAULT_MAT
lc_MATIDX_ERROR = MATIDX_ERROR
lc_PER_FACE_1_OR_2_SIDED = PER_FACE_1_OR_2_SIDED
lc_FACE_TWOSIDED = FACE_TWOSIDED
lc_MESH_TWOSIDED = MESH_TWOSIDED
faces = mesh.faces
hasFaceUV = mesh.faceUV
if foomesh:
looseEdges = mesh.looseEdges
else:
looseEdges = get_loose_edges(mesh)
file = self.file
file.write("numsurf %s\n" % (len(faces) + len(looseEdges)))
if not foomesh: verts = list(self.mesh.verts)
materials = self.mesh.materials
mlist = self.mlist
matidx_error_reported = False
objmats = []
for omat in materials:
if omat: objmats.append(omat.name)
else: objmats.append(None)
for f in faces:
if not objmats:
m_idx = 0
elif objmats[f.mat] in mlist:
m_idx = mlist.index(objmats[f.mat])
else:
if not lc_MATIDX_ERROR:
rdat = REPORT_DATA['warns']
rdat.append("Object %s" % self.obj.name)
rdat.append("has at least one material *index* assigned but not")
rdat.append("defined (not linked to an existing material).")
rdat.append("Result: some faces may be exported with a wrong color.")
rdat.append("You can assign materials in the Edit Buttons window (F9).")
elif not matidx_error_reported:
midxmsg = "- Same for object %s." % self.obj.name
REPORT_DATA['warns'].append(midxmsg)
lc_MATIDX_ERROR += 1
matidx_error_reported = True
m_idx = 0
if lc_ADD_DEFAULT_MAT: m_idx -= 1
refs = len(f)
flaglow = 0 # polygon
if lc_PER_FACE_1_OR_2_SIDED and hasFaceUV: # per face attribute
two_side = f.mode & lc_FACE_TWOSIDED
else: # global, for the whole mesh
two_side = self.mesh.mode & lc_MESH_TWOSIDED
two_side = (two_side > 0) << 1
flaghigh = f.smooth | two_side
surfstr = "SURF 0x%d%d\n" % (flaghigh, flaglow)
if lc_ADD_DEFAULT_MAT and objmats: m_idx += 1
matstr = "mat %s\n" % m_idx
refstr = "refs %s\n" % refs
u, v, vi = 0, 0, 0
fvstr = []
if foomesh:
for vert in f.v:
fvstr.append(str(vert.index))
if hasFaceUV:
u = f.uv[vi][0]
v = f.uv[vi][1]
vi += 1
fvstr.append(" %s %s\n" % (u, v))
else:
for vert in f.v:
fvstr.append(str(verts.index(vert)))
if hasFaceUV:
u = f.uv[vi][0]
v = f.uv[vi][1]
vi += 1
fvstr.append(" %s %s\n" % (u, v))
fvstr = "".join(fvstr)
file.write("%s%s%s%s" % (surfstr, matstr, refstr, fvstr))
# material for loose edges
edges_mat = 0 # default to first material
for omat in objmats: # but look for a material from this mesh
if omat in mlist:
edges_mat = mlist.index(omat)
if lc_ADD_DEFAULT_MAT: edges_mat += 1
break
for e in looseEdges:
fvstr = []
#flaglow = 2 # 1 = closed line, 2 = line
#flaghigh = 0
#surfstr = "SURF 0x%d%d\n" % (flaghigh, flaglow)
surfstr = "SURF 0x02\n"
fvstr.append("%d 0 0\n" % verts.index(e.v1))
fvstr.append("%d 0 0\n" % verts.index(e.v2))
fvstr = "".join(fvstr)
matstr = "mat %d\n" % edges_mat # for now, use first material
refstr = "refs 2\n" # 2 verts
file.write("%s%s%s%s" % (surfstr, matstr, refstr, fvstr))
MATIDX_ERROR = lc_MATIDX_ERROR
# End of Class AC3DExport
from Blender.Window import FileSelector
def report_data():
global VERBOSE
if not VERBOSE: return
d = REPORT_DATA
msgs = {
'0main': '%s\nExporting meshes to AC3D format' % str(19*'-'),
'1warns': 'Warnings',
'2errors': 'Errors',
'3nosplit': 'Not split (because name starts with "=" or "$")',
'4noexport': 'Not exported (because name starts with "!" or "#")'
}
if NO_SPLIT:
l = msgs['3nosplit']
l = "%s (because OPTION NO_SPLIT is set)" % l.split('(')[0]
msgs['3nosplit'] = l
keys = msgs.keys()
keys.sort()
for k in keys:
msgk = msgs[k]
msg = '\n'.join(d[k[1:]])
if msg:
print '\n-%s:' % msgk
print msg
# File Selector callback:
def fs_callback(filename):
global EXPORT_DIR, OBJS, CONFIRM_OVERWRITE, VERBOSE
if not filename.endswith('.ac'): filename = '%s.ac' % filename
if bsys.exists(filename) and CONFIRM_OVERWRITE:
if Blender.Draw.PupMenu('OVERWRITE?%t|File exists') != 1:
return
Blender.Window.WaitCursor(1)
starttime = bsys.time()
export_dir = bsys.dirname(filename)
if export_dir != EXPORT_DIR:
EXPORT_DIR = export_dir
update_RegistryInfo()
try:
file = open(filename, 'w')
except IOError, (errno, strerror):
error = "IOError #%s: %s" % (errno, strerror)
REPORT_DATA['errors'].append("Saving failed - %s." % error)
error_msg = "Couldn't save file!%%t|%s" % error
Blender.Draw.PupMenu(error_msg)
return
try:
test = AC3DExport(OBJS, file)
except:
file.close()
raise
else:
file.close()
endtime = bsys.time() - starttime
REPORT_DATA['main'].append("Done. Saved to: %s" % filename)
REPORT_DATA['main'].append("Data exported in %.3f seconds." % endtime)
if VERBOSE: report_data()
Blender.Window.WaitCursor(0)
# -- End of definitions
scn = Blender.Scene.GetCurrent()
if ONLY_SELECTED:
OBJS = list(scn.objects.context)
else:
OBJS = list(scn.objects)
if not OBJS:
Blender.Draw.PupMenu('ERROR: no objects selected')
else:
fname = bsys.makename(ext=".ac")
if EXPORT_DIR:
fname = bsys.join(EXPORT_DIR, bsys.basename(fname))
FileSelector(fs_callback, "Export AC3D", fname)
| 30.352232 | 228 | 0.657499 |
""" Registration info for Blender menus:
Name: 'AC3D (.ac)...'
Blender: 243
Group: 'Export'
Tip: 'Export selected meshes to AC3D (.ac) format'
"""
__author__ = "Willian P. Germano"
__url__ = ("blender", "blenderartists.org", "AC3D's homepage, http://www.ac3d.org",
"PLib 3d gaming lib, http://plib.sf.net")
__version__ = "2.44 2007-05-05"
__bpydoc__ = """\
This script exports selected Blender meshes to AC3D's .ac file format.
AC3D is a simple commercial 3d modeller also built with OpenGL.
The .ac file format is an easy to parse text format well supported,
for example, by the PLib 3d gaming library (AC3D 3.x).
Supported:<br>
UV-textured meshes with hierarchy (grouping) information.
Missing:<br>
The 'url' tag, specific to AC3D. It is easy to add by hand to the exported
file, if needed.
Known issues:<br>
The ambient and emit data we can retrieve from Blender are single values,
that this script copies to R, G, B, giving shades of gray.<br>
Loose edges (lines) receive the first material found in the mesh, if any, or a default white material.<br>
In AC3D 4 "compatibility mode":<br>
- shininess of materials is taken from the shader specularity value in Blender, mapped from [0.0, 2.0] to [0, 128];<br>
- crease angle is exported, but in Blender it is limited to [1, 80], since there are other more powerful ways to control surface smoothing. In AC3D 4.0 crease's range is [0.0, 180.0];
Config Options:<br>
toggle:<br>
- AC3D 4 mode: unset it to export without the 'crease' tag that was
introduced with AC3D 4.0 and with the old material handling;<br>
- global coords: transform all vertices of all meshes to global coordinates;<br>
- skip data: set it if you don't want mesh names (ME:, not OB: field)
to be exported as strings for AC's "data" tags (19 chars max);<br>
- rgb mirror color can be exported as ambient and/or emissive if needed,
since Blender handles these differently;<br>
- default mat: a default (white) material is added if some mesh was
left without mats -- it's better to always add your own materials;<br>
- no split: don't split meshes (see above);<br>
- set texture dir: override the actual textures path with a given default
path (or simply export the texture names, without dir info, if the path is
empty);<br>
- per face 1 or 2 sided: override the "Double Sided" button that defines this behavior per whole mesh in favor of the UV Face Select mode "twosided" per face atribute;<br>
- only selected: only consider selected objects when looking for meshes
to export (read notes below about tokens, too);<br>
strings:<br>
- export dir: default dir to export to;<br>
- texture dir: override textures path with this path if 'set texture dir'
toggle is "on".
Notes:<br>
This version updates:<br>
- modified meshes are correctly exported, no need to apply the modifiers in Blender;<br>
- correctly export each used material, be it assigned to the object or to its mesh data;<br>
- exporting lines (edges) is again supported; color comes from first material found in the mesh, if any, or a default white one.<br>
- there's a new option to choose between exporting meshes with transformed (global) coordinates or local ones;<br>
Multiple textures per mesh are supported (mesh gets split);<br>
Parents are exported as a group containing both the parent and its children;<br>
Start mesh object names (OB: field) with "!" or "#" if you don't want them to be exported;<br>
Start mesh object names (OB: field) with "=" or "$" to prevent them from being split (meshes with multiple textures or both textured and non textured faces are split unless this trick is used or the "no split" option is set.
"""
# $Id: ac3d_export.py 14530 2008-04-23 14:04:05Z campbellbarton $
#
# --------------------------------------------------------------------------
# AC3DExport version 2.44
# Program versions: Blender 2.42+ and AC3Db files (means version 0xb)
# new: updated for new Blender version and Mesh module; supports lines (edges) again;
# option to export vertices transformed to global coordinates or not; now the modified
# (by existing mesh modifiers) mesh is exported; materials are properly exported, no
# matter if each of them is linked to the mesh or to the object. New (2.43.1): loose
# edges use color of first material found in the mesh, if any.
# --------------------------------------------------------------------------
# Thanks: Steve Baker for discussions and inspiration; for testing, bug
# reports, suggestions, patches: David Megginson, Filippo di Natale,
# Franz Melchior, Campbell Barton, Josh Babcock, Ralf Gerlich, Stewart Andreason.
# --------------------------------------------------------------------------
# ***** BEGIN GPL LICENSE BLOCK *****
#
# Copyright (C) 2004-2007: Willian P. Germano, wgermano _at_ ig.com.br
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# --------------------------------------------------------------------------
import Blender
from Blender import Object, Mesh, Material, Image, Mathutils, Registry
from Blender import sys as bsys
# Globals
REPORT_DATA = {
'main': [],
'errors': [],
'warns': [],
'nosplit': [],
'noexport': []
}
TOKENS_DONT_EXPORT = ['!', '
TOKENS_DONT_SPLIT = ['=', '$']
MATIDX_ERROR = 0
# flags:
LOOSE = Mesh.EdgeFlags['LOOSE']
FACE_TWOSIDED = Mesh.FaceModes['TWOSIDE']
MESH_TWOSIDED = Mesh.Modes['TWOSIDED']
REG_KEY = 'ac3d_export'
# config options:
GLOBAL_COORDS = True
SKIP_DATA = False
MIRCOL_AS_AMB = False
MIRCOL_AS_EMIS = False
ADD_DEFAULT_MAT = True
SET_TEX_DIR = True
TEX_DIR = ''
AC3D_4 = True # export crease value, compatible with AC3D 4 loaders
NO_SPLIT = False
ONLY_SELECTED = True
EXPORT_DIR = ''
PER_FACE_1_OR_2_SIDED = True
tooltips = {
'GLOBAL_COORDS': "transform all vertices of all meshes to global coordinates",
'SKIP_DATA': "don't export mesh names as data fields",
'MIRCOL_AS_AMB': "export mirror color as ambient color",
'MIRCOL_AS_EMIS': "export mirror color as emissive color",
'ADD_DEFAULT_MAT': "always add a default white material",
'SET_TEX_DIR': "don't export default texture paths (edit also \"tex dir\")",
'EXPORT_DIR': "default / last folder used to export .ac files to",
'TEX_DIR': "(see \"set tex dir\") dir to prepend to all exported texture names (leave empty for no dir)",
'AC3D_4': "compatibility mode, adds 'crease' tag and slightly better material support",
'NO_SPLIT': "don't split meshes with multiple textures (or both textured and non textured polygons)",
'ONLY_SELECTED': "export only selected objects",
'PER_FACE_1_OR_2_SIDED': "override \"Double Sided\" button in favor of per face \"twosided\" attribute (UV Face Select mode)"
}
def update_RegistryInfo():
d = {}
d['SKIP_DATA'] = SKIP_DATA
d['MIRCOL_AS_AMB'] = MIRCOL_AS_AMB
d['MIRCOL_AS_EMIS'] = MIRCOL_AS_EMIS
d['ADD_DEFAULT_MAT'] = ADD_DEFAULT_MAT
d['SET_TEX_DIR'] = SET_TEX_DIR
d['TEX_DIR'] = TEX_DIR
d['AC3D_4'] = AC3D_4
d['NO_SPLIT'] = NO_SPLIT
d['EXPORT_DIR'] = EXPORT_DIR
d['ONLY_SELECTED'] = ONLY_SELECTED
d['PER_FACE_1_OR_2_SIDED'] = PER_FACE_1_OR_2_SIDED
d['tooltips'] = tooltips
d['GLOBAL_COORDS'] = GLOBAL_COORDS
Registry.SetKey(REG_KEY, d, True)
rd = Registry.GetKey(REG_KEY, True)
if rd:
try:
AC3D_4 = rd['AC3D_4']
SKIP_DATA = rd['SKIP_DATA']
MIRCOL_AS_AMB = rd['MIRCOL_AS_AMB']
MIRCOL_AS_EMIS = rd['MIRCOL_AS_EMIS']
ADD_DEFAULT_MAT = rd['ADD_DEFAULT_MAT']
SET_TEX_DIR = rd['SET_TEX_DIR']
TEX_DIR = rd['TEX_DIR']
EXPORT_DIR = rd['EXPORT_DIR']
ONLY_SELECTED = rd['ONLY_SELECTED']
NO_SPLIT = rd['NO_SPLIT']
PER_FACE_1_OR_2_SIDED = rd['PER_FACE_1_OR_2_SIDED']
GLOBAL_COORDS = rd['GLOBAL_COORDS']
except KeyError: update_RegistryInfo()
else:
update_RegistryInfo()
VERBOSE = True
CONFIRM_OVERWRITE = True
rd = Registry.GetKey('General', True)
if rd:
try:
VERBOSE = rd['verbose']
CONFIRM_OVERWRITE = rd['confirm_overwrite']
except: pass
DEFAULT_MAT = \
'MATERIAL "DefaultWhite" rgb 1 1 1 amb 1 1 1 emis 0 0 0 \
spec 0.5 0.5 0.5 shi 64 trans 0'
BLEND_TO_AC3D_MATRIX = Mathutils.Matrix([1,0,0,0], [0,0,-1,0], [0,1,0,0], [0,0,0,1])
def Round_s(f):
"Round to default precision and turn value to a string"
r = round(f,6)
if r == int(r):
return str(int(r))
else:
return str(r)
def transform_verts(verts, m):
vecs = []
for v in verts:
x, y, z = v.co
vec = Mathutils.Vector([x, y, z, 1])
vecs.append(vec*m)
return vecs
def get_loose_edges(mesh):
loose = LOOSE
return [e for e in mesh.edges if e.flag & loose]
class FooMesh:
class FooVert:
def __init__(self, v):
self.v = v
self.index = 0
class FooFace:
def __init__(self, foomesh, f):
self.f = f
foov = foomesh.FooVert
self.v = [foov(f.v[0]), foov(f.v[1])]
len_fv = len(f.v)
if len_fv > 2 and f.v[2]:
self.v.append(foov(f.v[2]))
if len_fv > 3 and f.v[3]: self.v.append(foov(f.v[3]))
def __getattr__(self, attr):
if attr == 'v': return self.v
return getattr(self.f, attr)
def __len__(self):
return len(self.f)
def __init__(self, tex, faces, mesh):
self.name = mesh.name
self.mesh = mesh
self.looseEdges = []
self.faceUV = mesh.faceUV
self.degr = mesh.degr
vidxs = [0]*len(mesh.verts)
foofaces = []
for f in faces:
foofaces.append(self.FooFace(self, f))
for v in f.v:
if v: vidxs[v.index] = 1
i = 0
fooverts = []
for v in mesh.verts:
if vidxs[v.index]:
fooverts.append(v)
vidxs[v.index] = i
i += 1
for f in foofaces:
for v in f.v:
if v: v.index = vidxs[v.v.index]
self.faces = foofaces
self.verts = fooverts
class AC3DExport:
def __init__(self, scene_objects, file):
global ARG, SKIP_DATA, ADD_DEFAULT_MAT, DEFAULT_MAT
header = 'AC3Db'
self.file = file
self.buf = ''
self.mbuf = []
self.mlist = []
world_kids = 0
parents_list = self.parents_list = []
kids_dict = self.kids_dict = {}
objs = []
exp_objs = self.exp_objs = []
tree = {}
file.write(header+'\n')
objs = \
[o for o in scene_objects if o.type in ['Mesh', 'Empty']]
for obj in objs[:]:
parent = obj.parent
lineage = [obj]
while parent:
parents_list.append(parent.name)
obj = parent
parent = parent.getParent()
lineage.insert(0, obj)
d = tree
for i in xrange(len(lineage)):
lname = lineage[i].getType()[:2] + lineage[i].name
if lname not in d.keys():
d[lname] = {}
d = d[lname]
self.traverse_dict(tree)
world_kids = len(tree.keys())
objlist = [Object.Get(name) for name in exp_objs]
meshlist = [o for o in objlist if o.type == 'Mesh']
TMP_mesh = Mesh.New('tmp_for_ac_export')
self.MATERIALS(meshlist, TMP_mesh)
mbuf = self.mbuf
if not mbuf or ADD_DEFAULT_MAT:
mbuf.insert(0, "%s\n" % DEFAULT_MAT)
mbuf = "".join(mbuf)
file.write(mbuf)
file.write('OBJECT world\nkids %s\n' % world_kids)
for obj in objlist:
self.obj = obj
objtype = obj.type
objname = obj.name
kidsnum = kids_dict[objname]
if kidsnum:
self.OBJECT('group')
self.name(objname)
if objtype == 'Mesh':
kidsnum += 1
if not GLOBAL_COORDS:
localmatrix = obj.getMatrix('localspace')
if not obj.getParent():
localmatrix *= BLEND_TO_AC3D_MATRIX
self.rot(localmatrix.rotationPart())
self.loc(localmatrix.translationPart())
self.kids(kidsnum)
if objtype == 'Mesh':
mesh = TMP_mesh
mesh.getFromObject(objname)
self.mesh = mesh
if mesh.faceUV:
meshes = self.split_mesh(mesh)
else:
meshes = [mesh]
if len(meshes) > 1:
if NO_SPLIT or self.dont_split(objname):
self.export_mesh(mesh, ob)
REPORT_DATA['nosplit'].append(objname)
else:
self.OBJECT('group')
self.name(objname)
self.kids(len(meshes))
counter = 0
for me in meshes:
self.export_mesh(me, obj,
name = '%s_%s' % (obj.name, counter), foomesh = True)
self.kids()
counter += 1
else:
self.export_mesh(mesh, obj)
self.kids()
def traverse_dict(self, d):
kids_dict = self.kids_dict
exp_objs = self.exp_objs
keys = d.keys()
keys.sort()
keys.reverse()
for k in keys:
objname = k[2:]
klen = len(d[k])
kids_dict[objname] = klen
if self.dont_export(objname):
d.pop(k)
parent = Object.Get(objname).getParent()
if parent: kids_dict[parent.name] -= 1
REPORT_DATA['noexport'].append(objname)
continue
if klen:
self.traverse_dict(d[k])
exp_objs.insert(0, objname)
else:
if k.find('Em', 0) == 0:
d.pop(k)
parent = Object.Get(objname).getParent()
if parent: kids_dict[parent.name] -= 1
else:
exp_objs.insert(0, objname)
def dont_export(self, name):
length = len(name)
if length >= 1:
if name[0] in TOKENS_DONT_EXPORT:
if length > 1 and name[1] == name[0]:
return 0
return 1
def dont_split(self, name):
length = len(name)
if length >= 1:
if name[0] in TOKENS_DONT_SPLIT:
if length > 1 and name[1] == name[0]:
return 0
return 1
def split_mesh(self, mesh):
tex_dict = {0:[]}
for f in mesh.faces:
if f.image:
if not f.image.name in tex_dict: tex_dict[f.image.name] = []
tex_dict[f.image.name].append(f)
else: tex_dict[0].append(f)
keys = tex_dict.keys()
len_keys = len(keys)
if not tex_dict[0]:
len_keys -= 1
tex_dict.pop(0)
keys.remove(0)
elif len_keys > 1:
lines = []
anyimgkey = [k for k in keys if k != 0][0]
for f in tex_dict[0]:
if len(f.v) < 3:
lines.append(f)
if len(tex_dict[0]) == len(lines):
for l in lines:
tex_dict[anyimgkey].append(l)
len_keys -= 1
tex_dict.pop(0)
if len_keys > 1:
foo_meshes = []
for k in keys:
faces = tex_dict[k]
foo_meshes.append(FooMesh(k, faces, mesh))
foo_meshes[0].edges = get_loose_edges(mesh)
return foo_meshes
return [mesh]
def export_mesh(self, mesh, obj, name = None, foomesh = False):
file = self.file
self.OBJECT('poly')
if not name: name = obj.name
self.name(name)
if not SKIP_DATA:
meshname = obj.getData(name_only = True)
self.data(len(meshname), meshname)
if mesh.faceUV:
texline = self.texture(mesh.faces)
if texline: file.write(texline)
if AC3D_4:
self.crease(mesh.degr)
# their topmost parents (the parents w/o parents) transformations.
if not GLOBAL_COORDS:
# We hold parents in a list, so they also don't get transformed,
if obj.name not in self.parents_list:
localmatrix = obj.getMatrix('localspace')
if not obj.getParent():
localmatrix *= BLEND_TO_AC3D_MATRIX
self.rot(localmatrix.rotationPart())
self.loc(localmatrix.translationPart())
matrix = None
else:
matrix = obj.getMatrix() * BLEND_TO_AC3D_MATRIX
self.numvert(mesh.verts, matrix)
self.numsurf(mesh, foomesh)
def MATERIALS(self, meshlist, me):
for meobj in meshlist:
me.getFromObject(meobj)
mats = me.materials
mbuf = []
mlist = self.mlist
for m in mats:
if not m: continue
name = m.name
if name not in mlist:
mlist.append(name)
M = Material.Get(name)
material = 'MATERIAL "%s"' % name
mirCol = "%s %s %s" % (Round_s(M.mirCol[0]), Round_s(M.mirCol[1]),
Round_s(M.mirCol[2]))
rgb = "rgb %s %s %s" % (Round_s(M.R), Round_s(M.G), Round_s(M.B))
ambval = Round_s(M.amb)
amb = "amb %s %s %s" % (ambval, ambval, ambval)
spec = "spec %s %s %s" % (Round_s(M.specCol[0]),
Round_s(M.specCol[1]), Round_s(M.specCol[2]))
if AC3D_4:
emit = Round_s(M.emit)
emis = "emis %s %s %s" % (emit, emit, emit)
shival = int(M.spec * 64)
else:
emis = "emis 0 0 0"
shival = 72
shi = "shi %s" % shival
trans = "trans %s" % (Round_s(1 - M.alpha))
if MIRCOL_AS_AMB:
amb = "amb %s" % mirCol
if MIRCOL_AS_EMIS:
emis = "emis %s" % mirCol
mbuf.append("%s %s %s %s %s %s %s\n" \
% (material, rgb, amb, emis, spec, shi, trans))
self.mlist = mlist
self.mbuf.append("".join(mbuf))
def OBJECT(self, type):
self.file.write('OBJECT %s\n' % type)
def name(self, name):
if name[0] in TOKENS_DONT_EXPORT or name[0] in TOKENS_DONT_SPLIT:
if len(name) > 1: name = name[1:]
self.file.write('name "%s"\n' % name)
def kids(self, num = 0):
self.file.write('kids %s\n' % num)
def data(self, num, str):
self.file.write('data %s\n%s\n' % (num, str))
def texture(self, faces):
tex = ""
for f in faces:
if f.image:
tex = f.image.name
break
if tex:
image = Image.Get(tex)
texfname = image.filename
if SET_TEX_DIR:
texfname = bsys.basename(texfname)
if TEX_DIR:
texfname = bsys.join(TEX_DIR, texfname)
buf = 'texture "%s"\n' % texfname
xrep = image.xrep
yrep = image.yrep
buf += 'texrep %s %s\n' % (xrep, yrep)
self.file.write(buf)
def rot(self, matrix):
rot = ''
not_I = 0
matstr = []
for i in [0, 1, 2]:
r = map(Round_s, matrix[i])
not_I += (r[0] != '0')+(r[1] != '0')+(r[2] != '0')
not_I -= (r[i] == '1')
for j in [0, 1, 2]:
matstr.append(' %s' % r[j])
if not_I:
self.file.write('rot%s\n' % "".join(matstr))
def loc(self, loc):
loc = map(Round_s, loc)
if loc != ['0', '0', '0']:
self.file.write('loc %s %s %s\n' % (loc[0], loc[1], loc[2]))
def crease(self, crease):
self.file.write('crease %f\n' % crease)
def numvert(self, verts, matrix):
file = self.file
nvstr = []
nvstr.append("numvert %s\n" % len(verts))
if matrix:
verts = transform_verts(verts, matrix)
for v in verts:
v = map (Round_s, v)
nvstr.append("%s %s %s\n" % (v[0], v[1], v[2]))
else:
for v in verts:
v = map(Round_s, v.co)
nvstr.append("%s %s %s\n" % (v[0], v[1], v[2]))
file.write("".join(nvstr))
def numsurf(self, mesh, foomesh = False):
global MATIDX_ERROR
lc_ADD_DEFAULT_MAT = ADD_DEFAULT_MAT
lc_MATIDX_ERROR = MATIDX_ERROR
lc_PER_FACE_1_OR_2_SIDED = PER_FACE_1_OR_2_SIDED
lc_FACE_TWOSIDED = FACE_TWOSIDED
lc_MESH_TWOSIDED = MESH_TWOSIDED
faces = mesh.faces
hasFaceUV = mesh.faceUV
if foomesh:
looseEdges = mesh.looseEdges
else:
looseEdges = get_loose_edges(mesh)
file = self.file
file.write("numsurf %s\n" % (len(faces) + len(looseEdges)))
if not foomesh: verts = list(self.mesh.verts)
materials = self.mesh.materials
mlist = self.mlist
matidx_error_reported = False
objmats = []
for omat in materials:
if omat: objmats.append(omat.name)
else: objmats.append(None)
for f in faces:
if not objmats:
m_idx = 0
elif objmats[f.mat] in mlist:
m_idx = mlist.index(objmats[f.mat])
else:
if not lc_MATIDX_ERROR:
rdat = REPORT_DATA['warns']
rdat.append("Object %s" % self.obj.name)
rdat.append("has at least one material *index* assigned but not")
rdat.append("defined (not linked to an existing material).")
rdat.append("Result: some faces may be exported with a wrong color.")
rdat.append("You can assign materials in the Edit Buttons window (F9).")
elif not matidx_error_reported:
midxmsg = "- Same for object %s." % self.obj.name
REPORT_DATA['warns'].append(midxmsg)
lc_MATIDX_ERROR += 1
matidx_error_reported = True
m_idx = 0
if lc_ADD_DEFAULT_MAT: m_idx -= 1
refs = len(f)
flaglow = 0
if lc_PER_FACE_1_OR_2_SIDED and hasFaceUV:
two_side = f.mode & lc_FACE_TWOSIDED
else:
two_side = self.mesh.mode & lc_MESH_TWOSIDED
two_side = (two_side > 0) << 1
flaghigh = f.smooth | two_side
surfstr = "SURF 0x%d%d\n" % (flaghigh, flaglow)
if lc_ADD_DEFAULT_MAT and objmats: m_idx += 1
matstr = "mat %s\n" % m_idx
refstr = "refs %s\n" % refs
u, v, vi = 0, 0, 0
fvstr = []
if foomesh:
for vert in f.v:
fvstr.append(str(vert.index))
if hasFaceUV:
u = f.uv[vi][0]
v = f.uv[vi][1]
vi += 1
fvstr.append(" %s %s\n" % (u, v))
else:
for vert in f.v:
fvstr.append(str(verts.index(vert)))
if hasFaceUV:
u = f.uv[vi][0]
v = f.uv[vi][1]
vi += 1
fvstr.append(" %s %s\n" % (u, v))
fvstr = "".join(fvstr)
file.write("%s%s%s%s" % (surfstr, matstr, refstr, fvstr))
edges_mat = 0
for omat in objmats:
if omat in mlist:
edges_mat = mlist.index(omat)
if lc_ADD_DEFAULT_MAT: edges_mat += 1
break
for e in looseEdges:
fvstr = []
0x02\n"
fvstr.append("%d 0 0\n" % verts.index(e.v1))
fvstr.append("%d 0 0\n" % verts.index(e.v2))
fvstr = "".join(fvstr)
matstr = "mat %d\n" % edges_mat
refstr = "refs 2\n"
file.write("%s%s%s%s" % (surfstr, matstr, refstr, fvstr))
MATIDX_ERROR = lc_MATIDX_ERROR
from Blender.Window import FileSelector
def report_data():
global VERBOSE
if not VERBOSE: return
d = REPORT_DATA
msgs = {
'0main': '%s\nExporting meshes to AC3D format' % str(19*'-'),
'1warns': 'Warnings',
'2errors': 'Errors',
'3nosplit': 'Not split (because name starts with "=" or "$")',
'4noexport': 'Not exported (because name starts with "!" or "#")'
}
if NO_SPLIT:
l = msgs['3nosplit']
l = "%s (because OPTION NO_SPLIT is set)" % l.split('(')[0]
msgs['3nosplit'] = l
keys = msgs.keys()
keys.sort()
for k in keys:
msgk = msgs[k]
msg = '\n'.join(d[k[1:]])
if msg:
print '\n-%s:' % msgk
print msg
def fs_callback(filename):
global EXPORT_DIR, OBJS, CONFIRM_OVERWRITE, VERBOSE
if not filename.endswith('.ac'): filename = '%s.ac' % filename
if bsys.exists(filename) and CONFIRM_OVERWRITE:
if Blender.Draw.PupMenu('OVERWRITE?%t|File exists') != 1:
return
Blender.Window.WaitCursor(1)
starttime = bsys.time()
export_dir = bsys.dirname(filename)
if export_dir != EXPORT_DIR:
EXPORT_DIR = export_dir
update_RegistryInfo()
try:
file = open(filename, 'w')
except IOError, (errno, strerror):
error = "IOError #%s: %s" % (errno, strerror)
REPORT_DATA['errors'].append("Saving failed - %s." % error)
error_msg = "Couldn't save file!%%t|%s" % error
Blender.Draw.PupMenu(error_msg)
return
try:
test = AC3DExport(OBJS, file)
except:
file.close()
raise
else:
file.close()
endtime = bsys.time() - starttime
REPORT_DATA['main'].append("Done. Saved to: %s" % filename)
REPORT_DATA['main'].append("Data exported in %.3f seconds." % endtime)
if VERBOSE: report_data()
Blender.Window.WaitCursor(0)
# -- End of definitions
scn = Blender.Scene.GetCurrent()
if ONLY_SELECTED:
OBJS = list(scn.objects.context)
else:
OBJS = list(scn.objects)
if not OBJS:
Blender.Draw.PupMenu('ERROR: no objects selected')
else:
fname = bsys.makename(ext=".ac")
if EXPORT_DIR:
fname = bsys.join(EXPORT_DIR, bsys.basename(fname))
FileSelector(fs_callback, "Export AC3D", fname)
| false | true |
f721053f1c2b0366de64431ea3ca1a8eaac1c75f | 9,874 | py | Python | tests/conftest.py | dobixu/elastalert2 | 2d403918514d7c6e8aa24658c4c1f683dd143d89 | [
"Apache-2.0"
] | 250 | 2021-04-24T18:06:30.000Z | 2022-03-31T04:37:47.000Z | tests/conftest.py | dobixu/elastalert2 | 2d403918514d7c6e8aa24658c4c1f683dd143d89 | [
"Apache-2.0"
] | 129 | 2021-04-24T17:09:50.000Z | 2022-03-29T08:52:14.000Z | tests/conftest.py | dobixu/elastalert2 | 2d403918514d7c6e8aa24658c4c1f683dd143d89 | [
"Apache-2.0"
] | 128 | 2021-04-25T15:20:34.000Z | 2022-03-31T04:37:49.000Z | # -*- coding: utf-8 -*-
import datetime
import logging
import os
from unittest import mock
import pytest
import elastalert.elastalert
import elastalert.util
from elastalert.util import dt_to_ts
from elastalert.util import ts_to_dt
writeback_index = 'wb'
def pytest_addoption(parser):
parser.addoption(
"--runelasticsearch", action="store_true", default=False, help="run elasticsearch tests"
)
def pytest_collection_modifyitems(config, items):
if config.getoption("--runelasticsearch"):
# --runelasticsearch given in cli: run elasticsearch tests, skip ordinary unit tests
skip_unit_tests = pytest.mark.skip(reason="not running when --runelasticsearch option is used to run")
for item in items:
if "elasticsearch" not in item.keywords:
item.add_marker(skip_unit_tests)
else:
# skip elasticsearch tests
skip_elasticsearch = pytest.mark.skip(reason="need --runelasticsearch option to run")
for item in items:
if "elasticsearch" in item.keywords:
item.add_marker(skip_elasticsearch)
@pytest.fixture(scope='function', autouse=True)
def reset_loggers():
"""Prevent logging handlers from capturing temporary file handles.
For example, a test that uses the `capsys` fixture and calls
`logging.exception()` will initialize logging with a default handler that
captures `sys.stderr`. When the test ends, the file handles will be closed
and `sys.stderr` will be returned to its original handle, but the logging
will have a dangling reference to the temporary handle used in the `capsys`
fixture.
"""
logger = logging.getLogger()
for handler in logger.handlers:
logger.removeHandler(handler)
class mock_es_indices_client(object):
def __init__(self):
self.exists = mock.Mock(return_value=True)
class mock_es_client(object):
def __init__(self, host='es', port=14900):
self.host = host
self.port = port
self.return_hits = []
self.search = mock.Mock()
self.deprecated_search = mock.Mock()
self.create = mock.Mock()
self.index = mock.Mock()
self.delete = mock.Mock()
self.info = mock.Mock(return_value={'status': 200, 'name': 'foo', 'version': {'number': '2.0'}})
self.ping = mock.Mock(return_value=True)
self.indices = mock_es_indices_client()
self.es_version = mock.Mock(return_value='2.0')
self.is_atleastfive = mock.Mock(return_value=False)
self.is_atleastsix = mock.Mock(return_value=False)
self.is_atleastsixtwo = mock.Mock(return_value=False)
self.is_atleastsixsix = mock.Mock(return_value=False)
self.is_atleastseven = mock.Mock(return_value=False)
self.resolve_writeback_index = mock.Mock(return_value=writeback_index)
class mock_es_sixsix_client(object):
def __init__(self, host='es', port=14900):
self.host = host
self.port = port
self.return_hits = []
self.search = mock.Mock()
self.deprecated_search = mock.Mock()
self.create = mock.Mock()
self.index = mock.Mock()
self.delete = mock.Mock()
self.info = mock.Mock(return_value={'status': 200, 'name': 'foo', 'version': {'number': '6.6.0'}})
self.ping = mock.Mock(return_value=True)
self.indices = mock_es_indices_client()
self.es_version = mock.Mock(return_value='6.6.0')
self.is_atleastfive = mock.Mock(return_value=True)
self.is_atleastsix = mock.Mock(return_value=True)
self.is_atleastsixtwo = mock.Mock(return_value=False)
self.is_atleastsixsix = mock.Mock(return_value=True)
self.is_atleastseven = mock.Mock(return_value=False)
def writeback_index_side_effect(index, doc_type):
if doc_type == 'silence':
return index + '_silence'
elif doc_type == 'past_elastalert':
return index + '_past'
elif doc_type == 'elastalert_status':
return index + '_status'
elif doc_type == 'elastalert_error':
return index + '_error'
return index
self.resolve_writeback_index = mock.Mock(side_effect=writeback_index_side_effect)
class mock_rule_loader(object):
def __init__(self, conf):
self.base_config = conf
self.load = mock.Mock()
self.get_hashes = mock.Mock()
self.load_configuration = mock.Mock()
class mock_ruletype(object):
def __init__(self):
self.add_data = mock.Mock()
self.add_count_data = mock.Mock()
self.add_terms_data = mock.Mock()
self.matches = []
self.get_match_data = lambda x: x
self.get_match_str = lambda x: "some stuff happened"
self.garbage_collect = mock.Mock()
class mock_alert(object):
def __init__(self):
self.alert = mock.Mock()
def get_info(self):
return {'type': 'mock'}
@pytest.fixture
def ea():
rules = [{'es_host': '',
'es_port': 14900,
'name': 'anytest',
'index': 'idx',
'filter': [],
'include': ['@timestamp'],
'aggregation': datetime.timedelta(0),
'realert': datetime.timedelta(0),
'processed_hits': {},
'timestamp_field': '@timestamp',
'match_enhancements': [],
'rule_file': 'blah.yaml',
'max_query_size': 10000,
'ts_to_dt': ts_to_dt,
'dt_to_ts': dt_to_ts,
'_source_enabled': True,
'run_every': datetime.timedelta(seconds=15)}]
conf = {'rules_folder': 'rules',
'run_every': datetime.timedelta(minutes=10),
'buffer_time': datetime.timedelta(minutes=5),
'alert_time_limit': datetime.timedelta(hours=24),
'es_host': 'es',
'es_port': 14900,
'writeback_index': 'wb',
'rules': rules,
'max_query_size': 10000,
'old_query_limit': datetime.timedelta(weeks=1),
'disable_rules_on_error': False,
'scroll_keepalive': '30s',
'custom_pretty_ts_format': '%Y-%m-%d %H:%M'}
elastalert.util.elasticsearch_client = mock_es_client
conf['rules_loader'] = mock_rule_loader(conf)
elastalert.elastalert.elasticsearch_client = mock_es_client
with mock.patch('elastalert.elastalert.load_conf') as load_conf:
with mock.patch('elastalert.elastalert.BackgroundScheduler'):
load_conf.return_value = conf
conf['rules_loader'].load.return_value = rules
conf['rules_loader'].get_hashes.return_value = {}
ea = elastalert.elastalert.ElastAlerter(['--pin_rules'])
ea.rules[0]['type'] = mock_ruletype()
ea.rules[0]['alert'] = [mock_alert()]
ea.writeback_es = mock_es_client()
ea.writeback_es.search.return_value = {'hits': {'hits': []}, 'total': 0}
ea.writeback_es.deprecated_search.return_value = {'hits': {'hits': []}}
ea.writeback_es.index.return_value = {'_id': 'ABCD', 'created': True}
ea.current_es = mock_es_client('', '')
ea.thread_data.current_es = ea.current_es
ea.thread_data.num_hits = 0
ea.thread_data.num_dupes = 0
return ea
@pytest.fixture
def ea_sixsix():
rules = [{'es_host': '',
'es_port': 14900,
'name': 'anytest',
'index': 'idx',
'filter': [],
'include': ['@timestamp'],
'run_every': datetime.timedelta(seconds=1),
'aggregation': datetime.timedelta(0),
'realert': datetime.timedelta(0),
'processed_hits': {},
'timestamp_field': '@timestamp',
'match_enhancements': [],
'rule_file': 'blah.yaml',
'max_query_size': 10000,
'ts_to_dt': ts_to_dt,
'dt_to_ts': dt_to_ts,
'_source_enabled': True}]
conf = {'rules_folder': 'rules',
'run_every': datetime.timedelta(minutes=10),
'buffer_time': datetime.timedelta(minutes=5),
'alert_time_limit': datetime.timedelta(hours=24),
'es_host': 'es',
'es_port': 14900,
'writeback_index': writeback_index,
'rules': rules,
'max_query_size': 10000,
'old_query_limit': datetime.timedelta(weeks=1),
'disable_rules_on_error': False,
'scroll_keepalive': '30s',
'custom_pretty_ts_format': '%Y-%m-%d %H:%M'}
conf['rules_loader'] = mock_rule_loader(conf)
elastalert.elastalert.elasticsearch_client = mock_es_sixsix_client
elastalert.util.elasticsearch_client = mock_es_sixsix_client
with mock.patch('elastalert.elastalert.load_conf') as load_conf:
with mock.patch('elastalert.elastalert.BackgroundScheduler'):
load_conf.return_value = conf
conf['rules_loader'].load.return_value = rules
conf['rules_loader'].get_hashes.return_value = {}
ea_sixsix = elastalert.elastalert.ElastAlerter(['--pin_rules'])
ea_sixsix.rules[0]['type'] = mock_ruletype()
ea_sixsix.rules[0]['alert'] = [mock_alert()]
ea_sixsix.writeback_es = mock_es_sixsix_client()
ea_sixsix.writeback_es.search.return_value = {'hits': {'hits': []}}
ea_sixsix.writeback_es.deprecated_search.return_value = {'hits': {'hits': []}}
ea_sixsix.writeback_es.index.return_value = {'_id': 'ABCD'}
ea_sixsix.current_es = mock_es_sixsix_client('', -1)
return ea_sixsix
@pytest.fixture(scope='function')
def environ():
"""py.test fixture to get a fresh mutable environment."""
old_env = os.environ
new_env = dict(list(old_env.items()))
os.environ = new_env
yield os.environ
os.environ = old_env
| 38.570313 | 110 | 0.623962 |
import datetime
import logging
import os
from unittest import mock
import pytest
import elastalert.elastalert
import elastalert.util
from elastalert.util import dt_to_ts
from elastalert.util import ts_to_dt
writeback_index = 'wb'
def pytest_addoption(parser):
parser.addoption(
"--runelasticsearch", action="store_true", default=False, help="run elasticsearch tests"
)
def pytest_collection_modifyitems(config, items):
if config.getoption("--runelasticsearch"):
skip_unit_tests = pytest.mark.skip(reason="not running when --runelasticsearch option is used to run")
for item in items:
if "elasticsearch" not in item.keywords:
item.add_marker(skip_unit_tests)
else:
skip_elasticsearch = pytest.mark.skip(reason="need --runelasticsearch option to run")
for item in items:
if "elasticsearch" in item.keywords:
item.add_marker(skip_elasticsearch)
@pytest.fixture(scope='function', autouse=True)
def reset_loggers():
logger = logging.getLogger()
for handler in logger.handlers:
logger.removeHandler(handler)
class mock_es_indices_client(object):
def __init__(self):
self.exists = mock.Mock(return_value=True)
class mock_es_client(object):
def __init__(self, host='es', port=14900):
self.host = host
self.port = port
self.return_hits = []
self.search = mock.Mock()
self.deprecated_search = mock.Mock()
self.create = mock.Mock()
self.index = mock.Mock()
self.delete = mock.Mock()
self.info = mock.Mock(return_value={'status': 200, 'name': 'foo', 'version': {'number': '2.0'}})
self.ping = mock.Mock(return_value=True)
self.indices = mock_es_indices_client()
self.es_version = mock.Mock(return_value='2.0')
self.is_atleastfive = mock.Mock(return_value=False)
self.is_atleastsix = mock.Mock(return_value=False)
self.is_atleastsixtwo = mock.Mock(return_value=False)
self.is_atleastsixsix = mock.Mock(return_value=False)
self.is_atleastseven = mock.Mock(return_value=False)
self.resolve_writeback_index = mock.Mock(return_value=writeback_index)
class mock_es_sixsix_client(object):
def __init__(self, host='es', port=14900):
self.host = host
self.port = port
self.return_hits = []
self.search = mock.Mock()
self.deprecated_search = mock.Mock()
self.create = mock.Mock()
self.index = mock.Mock()
self.delete = mock.Mock()
self.info = mock.Mock(return_value={'status': 200, 'name': 'foo', 'version': {'number': '6.6.0'}})
self.ping = mock.Mock(return_value=True)
self.indices = mock_es_indices_client()
self.es_version = mock.Mock(return_value='6.6.0')
self.is_atleastfive = mock.Mock(return_value=True)
self.is_atleastsix = mock.Mock(return_value=True)
self.is_atleastsixtwo = mock.Mock(return_value=False)
self.is_atleastsixsix = mock.Mock(return_value=True)
self.is_atleastseven = mock.Mock(return_value=False)
def writeback_index_side_effect(index, doc_type):
if doc_type == 'silence':
return index + '_silence'
elif doc_type == 'past_elastalert':
return index + '_past'
elif doc_type == 'elastalert_status':
return index + '_status'
elif doc_type == 'elastalert_error':
return index + '_error'
return index
self.resolve_writeback_index = mock.Mock(side_effect=writeback_index_side_effect)
class mock_rule_loader(object):
def __init__(self, conf):
self.base_config = conf
self.load = mock.Mock()
self.get_hashes = mock.Mock()
self.load_configuration = mock.Mock()
class mock_ruletype(object):
def __init__(self):
self.add_data = mock.Mock()
self.add_count_data = mock.Mock()
self.add_terms_data = mock.Mock()
self.matches = []
self.get_match_data = lambda x: x
self.get_match_str = lambda x: "some stuff happened"
self.garbage_collect = mock.Mock()
class mock_alert(object):
def __init__(self):
self.alert = mock.Mock()
def get_info(self):
return {'type': 'mock'}
@pytest.fixture
def ea():
rules = [{'es_host': '',
'es_port': 14900,
'name': 'anytest',
'index': 'idx',
'filter': [],
'include': ['@timestamp'],
'aggregation': datetime.timedelta(0),
'realert': datetime.timedelta(0),
'processed_hits': {},
'timestamp_field': '@timestamp',
'match_enhancements': [],
'rule_file': 'blah.yaml',
'max_query_size': 10000,
'ts_to_dt': ts_to_dt,
'dt_to_ts': dt_to_ts,
'_source_enabled': True,
'run_every': datetime.timedelta(seconds=15)}]
conf = {'rules_folder': 'rules',
'run_every': datetime.timedelta(minutes=10),
'buffer_time': datetime.timedelta(minutes=5),
'alert_time_limit': datetime.timedelta(hours=24),
'es_host': 'es',
'es_port': 14900,
'writeback_index': 'wb',
'rules': rules,
'max_query_size': 10000,
'old_query_limit': datetime.timedelta(weeks=1),
'disable_rules_on_error': False,
'scroll_keepalive': '30s',
'custom_pretty_ts_format': '%Y-%m-%d %H:%M'}
elastalert.util.elasticsearch_client = mock_es_client
conf['rules_loader'] = mock_rule_loader(conf)
elastalert.elastalert.elasticsearch_client = mock_es_client
with mock.patch('elastalert.elastalert.load_conf') as load_conf:
with mock.patch('elastalert.elastalert.BackgroundScheduler'):
load_conf.return_value = conf
conf['rules_loader'].load.return_value = rules
conf['rules_loader'].get_hashes.return_value = {}
ea = elastalert.elastalert.ElastAlerter(['--pin_rules'])
ea.rules[0]['type'] = mock_ruletype()
ea.rules[0]['alert'] = [mock_alert()]
ea.writeback_es = mock_es_client()
ea.writeback_es.search.return_value = {'hits': {'hits': []}, 'total': 0}
ea.writeback_es.deprecated_search.return_value = {'hits': {'hits': []}}
ea.writeback_es.index.return_value = {'_id': 'ABCD', 'created': True}
ea.current_es = mock_es_client('', '')
ea.thread_data.current_es = ea.current_es
ea.thread_data.num_hits = 0
ea.thread_data.num_dupes = 0
return ea
@pytest.fixture
def ea_sixsix():
rules = [{'es_host': '',
'es_port': 14900,
'name': 'anytest',
'index': 'idx',
'filter': [],
'include': ['@timestamp'],
'run_every': datetime.timedelta(seconds=1),
'aggregation': datetime.timedelta(0),
'realert': datetime.timedelta(0),
'processed_hits': {},
'timestamp_field': '@timestamp',
'match_enhancements': [],
'rule_file': 'blah.yaml',
'max_query_size': 10000,
'ts_to_dt': ts_to_dt,
'dt_to_ts': dt_to_ts,
'_source_enabled': True}]
conf = {'rules_folder': 'rules',
'run_every': datetime.timedelta(minutes=10),
'buffer_time': datetime.timedelta(minutes=5),
'alert_time_limit': datetime.timedelta(hours=24),
'es_host': 'es',
'es_port': 14900,
'writeback_index': writeback_index,
'rules': rules,
'max_query_size': 10000,
'old_query_limit': datetime.timedelta(weeks=1),
'disable_rules_on_error': False,
'scroll_keepalive': '30s',
'custom_pretty_ts_format': '%Y-%m-%d %H:%M'}
conf['rules_loader'] = mock_rule_loader(conf)
elastalert.elastalert.elasticsearch_client = mock_es_sixsix_client
elastalert.util.elasticsearch_client = mock_es_sixsix_client
with mock.patch('elastalert.elastalert.load_conf') as load_conf:
with mock.patch('elastalert.elastalert.BackgroundScheduler'):
load_conf.return_value = conf
conf['rules_loader'].load.return_value = rules
conf['rules_loader'].get_hashes.return_value = {}
ea_sixsix = elastalert.elastalert.ElastAlerter(['--pin_rules'])
ea_sixsix.rules[0]['type'] = mock_ruletype()
ea_sixsix.rules[0]['alert'] = [mock_alert()]
ea_sixsix.writeback_es = mock_es_sixsix_client()
ea_sixsix.writeback_es.search.return_value = {'hits': {'hits': []}}
ea_sixsix.writeback_es.deprecated_search.return_value = {'hits': {'hits': []}}
ea_sixsix.writeback_es.index.return_value = {'_id': 'ABCD'}
ea_sixsix.current_es = mock_es_sixsix_client('', -1)
return ea_sixsix
@pytest.fixture(scope='function')
def environ():
old_env = os.environ
new_env = dict(list(old_env.items()))
os.environ = new_env
yield os.environ
os.environ = old_env
| true | true |
f721054ced7239cd366b9a4117dc04473f5453e9 | 310 | py | Python | allauth/app_settings.py | tobiasgoecke/django-allauth | 5e80865e521a6ec7b4e0bf4aa62ba470a8376e28 | [
"MIT"
] | 2 | 2016-05-24T21:13:32.000Z | 2017-12-27T13:43:26.000Z | allauth/app_settings.py | tobiasgoecke/django-allauth | 5e80865e521a6ec7b4e0bf4aa62ba470a8376e28 | [
"MIT"
] | null | null | null | allauth/app_settings.py | tobiasgoecke/django-allauth | 5e80865e521a6ec7b4e0bf4aa62ba470a8376e28 | [
"MIT"
] | null | null | null | from django.conf import settings
SOCIALACCOUNT_ENABLED = 'allauth.socialaccount' in settings.INSTALLED_APPS
LOGIN_REDIRECT_URL = getattr(settings, 'LOGIN_REDIRECT_URL', '/')
USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
REGISTRATION_OPEN = getattr(settings, 'REGISTRATION_OPEN', 'True')
| 25.833333 | 74 | 0.790323 | from django.conf import settings
SOCIALACCOUNT_ENABLED = 'allauth.socialaccount' in settings.INSTALLED_APPS
LOGIN_REDIRECT_URL = getattr(settings, 'LOGIN_REDIRECT_URL', '/')
USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
REGISTRATION_OPEN = getattr(settings, 'REGISTRATION_OPEN', 'True')
| true | true |
f721060bb454c8f7e5e8d09071be951a7eff3765 | 13,013 | py | Python | tests/p2p/discv5/test_enr.py | AndreMiras/trinity | 6c20e2b63a698d345c282db8ab0cd426f4329ff5 | [
"MIT"
] | null | null | null | tests/p2p/discv5/test_enr.py | AndreMiras/trinity | 6c20e2b63a698d345c282db8ab0cd426f4329ff5 | [
"MIT"
] | null | null | null | tests/p2p/discv5/test_enr.py | AndreMiras/trinity | 6c20e2b63a698d345c282db8ab0cd426f4329ff5 | [
"MIT"
] | null | null | null | import base64
import pytest
import rlp
from eth_utils import (
decode_hex,
to_bytes,
ValidationError,
)
from eth_utils.toolz import (
assoc,
assoc_in,
)
from p2p.discv5.enr import (
ENR,
ENRSedes,
UnsignedENR,
)
from p2p.discv5.identity_schemes import (
IdentityScheme,
V4IdentityScheme,
IdentitySchemeRegistry,
)
from p2p.forkid import ForkID
# Source: https://github.com/fjl/EIPs/blob/0acb5939555cbd0efcdd04da0d3acb0cc81d049a/EIPS/eip-778.md
OFFICIAL_TEST_DATA = {
"repr": (
"enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkT"
"fj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHY"
"pMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8"
),
"private_key": decode_hex("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291"),
"public_key": decode_hex("03ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd3138"),
"node_id": decode_hex("a448f24c6d18e575453db13171562b71999873db5b286df957af199ec94617f7"),
"identity_scheme": V4IdentityScheme,
"sequence_number": 1,
"kv_pairs": {
b"id": b"v4",
b"ip": decode_hex("7f000001"),
b"secp256k1": decode_hex(
"03ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd3138",
),
b"udp": 0x765f,
}
}
# This is an ENR sent by geth and it includes a fork ID (https://eips.ethereum.org/EIPS/eip-2124)
# kv pair as well.
REAL_LIFE_TEST_DATA = {
"repr": (
"enr:-Jq4QO5zEyIBU5lSa9iaen0A2xUB5_IVrCi1DbyASTTnLV5RJan6aGPr8kU0p0MYKU5YezZgdSUE"
"-GOBEio6Ultyf1Aog2V0aMrJhGN2AZCDGfCggmlkgnY0gmlwhF4_wLuJc2VjcDI1NmsxoQOt7cA_B_Kg"
"nQ5RmwyA6ji8M1Y0jfINItRGbOOwy7XgbIN0Y3CCdl-DdWRwgnZf"
),
"public_key": decode_hex("03adedc03f07f2a09d0e519b0c80ea38bc3356348df20d22d4466ce3b0cbb5e06c"),
"node_id": decode_hex("dc8542768b457753669bebfe215d5f9ef4adb7d7df84beabddbe98350869165f"),
"identity_scheme": V4IdentityScheme,
"sequence_number": 40,
"kv_pairs": {
b"eth": (ForkID(hash=to_bytes(hexstr='0x63760190'), next=1700000), ),
b"id": b"v4",
b"ip": decode_hex("5e3fc0bb"),
b"secp256k1": decode_hex(
"03adedc03f07f2a09d0e519b0c80ea38bc3356348df20d22d4466ce3b0cbb5e06c",
),
b"tcp": 30303,
b"udp": 30303,
}
}
class MockIdentityScheme(IdentityScheme):
id = b"mock"
private_key_size = 32
@classmethod
def create_enr_signature(cls, enr, private_key: bytes) -> bytes:
if len(private_key) != cls.private_key_size:
raise ValidationError("Invalid private key")
return private_key + enr.get_signing_message()
@classmethod
def validate_enr_structure(cls, enr) -> None:
pass
@classmethod
def validate_enr_signature(cls, enr) -> None:
if not enr.signature == enr.node_id + enr.get_signing_message():
raise ValidationError("Invalid signature")
@classmethod
def extract_public_key(cls, enr) -> bytes:
return b""
@classmethod
def extract_node_id(cls, enr) -> bytes:
return enr.signature[:cls.private_key_size]
@pytest.fixture
def mock_identity_scheme():
return MockIdentityScheme
@pytest.fixture
def identity_scheme_registry(mock_identity_scheme):
registry = IdentitySchemeRegistry()
registry.register(V4IdentityScheme)
registry.register(mock_identity_scheme)
return registry
def test_mapping_interface(identity_scheme_registry):
kv_pairs = {
b"id": b"mock",
b"key1": b"value1",
b"key2": b"value2",
}
enr = ENR(
signature=b"",
sequence_number=0,
kv_pairs=kv_pairs,
identity_scheme_registry=identity_scheme_registry,
)
for key, value in kv_pairs.items():
assert key in enr
assert enr[key] == value
assert enr.get(key) == value
not_a_key = b"key3"
assert not_a_key not in kv_pairs
assert not_a_key not in enr
enr.get(not_a_key) is None
assert enr.get(not_a_key, b"default") == b"default"
assert tuple(enr.keys()) == tuple(kv_pairs.keys())
assert tuple(enr.values()) == tuple(kv_pairs.values())
assert tuple(enr.items()) == tuple(kv_pairs.items())
assert len(enr) == len(kv_pairs)
assert tuple(iter(enr)) == tuple(iter(kv_pairs))
def test_inititialization(identity_scheme_registry):
valid_sequence_number = 0
valid_kv_pairs = {b"id": b"mock"}
valid_signature = b"" # signature is not validated during initialization
assert UnsignedENR(
sequence_number=valid_sequence_number,
kv_pairs=valid_kv_pairs,
identity_scheme_registry=identity_scheme_registry,
)
assert ENR(
sequence_number=valid_sequence_number,
kv_pairs=valid_kv_pairs,
signature=valid_signature,
identity_scheme_registry=identity_scheme_registry,
)
with pytest.raises(ValidationError):
UnsignedENR(
sequence_number=valid_sequence_number,
kv_pairs={b"no-id": b""},
identity_scheme_registry=identity_scheme_registry,
)
with pytest.raises(ValidationError):
ENR(
sequence_number=valid_sequence_number,
kv_pairs={b"no-id": b""},
signature=valid_signature,
identity_scheme_registry=identity_scheme_registry,
)
with pytest.raises(ValidationError):
UnsignedENR(
sequence_number=-1,
kv_pairs=valid_kv_pairs,
identity_scheme_registry=identity_scheme_registry,
)
with pytest.raises(ValidationError):
ENR(
sequence_number=-1,
kv_pairs=valid_kv_pairs,
signature=valid_signature,
identity_scheme_registry=identity_scheme_registry,
)
def test_signing(mock_identity_scheme, identity_scheme_registry):
unsigned_enr = UnsignedENR(
sequence_number=0,
kv_pairs={b"id": b"mock"},
identity_scheme_registry=identity_scheme_registry
)
private_key = b"\x00" * 32
enr = unsigned_enr.to_signed_enr(private_key)
assert enr.signature == mock_identity_scheme.create_enr_signature(enr, private_key)
def test_signature_validation(mock_identity_scheme, identity_scheme_registry):
unsigned_enr = UnsignedENR(0, {b"id": b"mock"}, identity_scheme_registry)
private_key = b"\x00" * 32
enr = unsigned_enr.to_signed_enr(private_key)
enr.validate_signature()
invalid_signature = b"\xff" * 64
invalid_enr = ENR(
enr.sequence_number,
dict(enr),
invalid_signature,
identity_scheme_registry=identity_scheme_registry
)
with pytest.raises(ValidationError):
invalid_enr.validate_signature()
with pytest.raises(ValidationError):
ENR(
0,
{b"id": b"unknown"},
b"",
identity_scheme_registry=identity_scheme_registry,
)
def test_public_key(mock_identity_scheme, identity_scheme_registry):
unsigned_enr = UnsignedENR(0, {b"id": b"mock"}, identity_scheme_registry)
private_key = b"\x00" * 32
enr = unsigned_enr.to_signed_enr(private_key)
assert enr.public_key == mock_identity_scheme.extract_public_key(enr)
def test_node_id(mock_identity_scheme, identity_scheme_registry):
unsigned_enr = UnsignedENR(0, {b"id": b"mock"}, identity_scheme_registry)
private_key = b"\x00" * 32
enr = unsigned_enr.to_signed_enr(private_key)
assert enr.node_id == private_key
def test_signature_scheme_selection(mock_identity_scheme, identity_scheme_registry):
mock_enr = ENR(0, {b"id": b"mock"}, b"", identity_scheme_registry)
assert mock_enr.identity_scheme is mock_identity_scheme
v4_enr = ENR(0, {b"id": b"v4", b"secp256k1": b"\x02" * 33}, b"", identity_scheme_registry)
assert v4_enr.identity_scheme is V4IdentityScheme
with pytest.raises(ValidationError):
ENR(0, {b"id": b"other"}, b"", identity_scheme_registry)
def test_repr(mock_identity_scheme, identity_scheme_registry):
unsigned_enr = UnsignedENR(0, {b"id": b"mock"}, identity_scheme_registry)
enr = unsigned_enr.to_signed_enr(b"\x00" * 32)
base64_encoded_enr = base64.urlsafe_b64encode(rlp.encode(enr))
represented_enr = repr(enr)
assert represented_enr.startswith("enr:")
assert base64_encoded_enr.rstrip(b"=").decode() == represented_enr[4:]
assert ENR.from_repr(represented_enr, identity_scheme_registry) == enr
def test_deserialization_key_order_validation(identity_scheme_registry):
serialized_enr = rlp.encode([
b"signature",
0,
b"key1",
b"value1",
b"id",
b"",
b"key2",
b"value2",
])
with pytest.raises(rlp.DeserializationError):
rlp.decode(
serialized_enr,
ENRSedes,
identity_scheme_registry=identity_scheme_registry,
)
def test_deserialization_key_uniqueness_validation(identity_scheme_registry):
serialized_enr = rlp.encode([
b"signature",
0,
b"key1",
b"value1",
b"id",
b"",
b"key1",
b"value2",
])
with pytest.raises(rlp.DeserializationError):
rlp.decode(
serialized_enr,
ENRSedes,
identity_scheme_registry=identity_scheme_registry,
)
@pytest.mark.parametrize("incomplete_enr", (
(),
(b"signature",),
(b"signature", 0, b"key1"),
(b"signature", 0, b"key1", b"value1", b"id"),
))
def test_deserialization_completeness_validation(incomplete_enr, identity_scheme_registry):
incomplete_enr_rlp = rlp.encode(incomplete_enr)
with pytest.raises(rlp.DeserializationError):
rlp.decode(
incomplete_enr_rlp,
ENRSedes,
identity_scheme_registry=identity_scheme_registry,
)
def test_equality(identity_scheme_registry):
base_kwargs = {
"sequence_number": 0,
"kv_pairs": {
b"id": b"mock",
b"key1": b"value1",
b"key2": b"value2",
},
"signature": b"signature",
"identity_scheme_registry": identity_scheme_registry,
}
base_enr = ENR(**base_kwargs)
equal_enr = ENR(**base_kwargs)
enr_different_sequence_number = ENR(
**assoc(base_kwargs, "sequence_number", 1)
)
enr_different_kv_pairs = ENR(
**assoc_in(base_kwargs, ("kv_pairs", b"key1"), b"value2"),
)
enr_different_signature = ENR(
**assoc(base_kwargs, "signature", b"different-signature")
)
assert base_enr == base_enr
assert equal_enr == base_enr
assert enr_different_sequence_number != base_enr
assert enr_different_kv_pairs != base_enr
assert enr_different_signature != base_enr
def test_serialization_roundtrip(identity_scheme_registry):
original_enr = ENR(
sequence_number=0,
kv_pairs={
b"id": b"mock",
b"key2": b"value2", # wrong order so that serialization is forced to fix this
b"key1": b"value1",
},
signature=b"",
identity_scheme_registry=identity_scheme_registry,
)
encoded = rlp.encode(original_enr)
recovered_enr = rlp.decode(
encoded,
ENR,
identity_scheme_registry=identity_scheme_registry,
)
assert recovered_enr == original_enr
@pytest.mark.parametrize("invalid_kv_pairs", (
{b"id": b"v4"}, # missing public key
{b"id": b"v4", b"secp256k1": b"\x00"}, # invalid public key
))
def test_v4_structure_validation(invalid_kv_pairs, identity_scheme_registry):
with pytest.raises(ValidationError):
UnsignedENR(
sequence_number=0,
kv_pairs=invalid_kv_pairs,
identity_scheme_registry=identity_scheme_registry,
)
def test_official_test_vector():
enr = ENR.from_repr(OFFICIAL_TEST_DATA["repr"]) # use default identity scheme registry
assert enr.sequence_number == OFFICIAL_TEST_DATA["sequence_number"]
assert dict(enr) == OFFICIAL_TEST_DATA["kv_pairs"]
assert enr.public_key == OFFICIAL_TEST_DATA["public_key"]
assert enr.node_id == OFFICIAL_TEST_DATA["node_id"]
assert enr.identity_scheme is OFFICIAL_TEST_DATA["identity_scheme"]
assert repr(enr) == OFFICIAL_TEST_DATA["repr"]
unsigned_enr = UnsignedENR(enr.sequence_number, dict(enr))
reconstructed_enr = unsigned_enr.to_signed_enr(OFFICIAL_TEST_DATA["private_key"])
assert reconstructed_enr == enr
def test_real_life_test_vector():
enr = ENR.from_repr(REAL_LIFE_TEST_DATA["repr"])
assert enr.sequence_number == REAL_LIFE_TEST_DATA["sequence_number"]
assert enr.public_key == REAL_LIFE_TEST_DATA["public_key"]
assert enr.node_id == REAL_LIFE_TEST_DATA["node_id"]
assert enr.identity_scheme is REAL_LIFE_TEST_DATA["identity_scheme"]
assert dict(enr) == REAL_LIFE_TEST_DATA["kv_pairs"]
assert repr(enr) == REAL_LIFE_TEST_DATA["repr"]
| 31.508475 | 99 | 0.683394 | import base64
import pytest
import rlp
from eth_utils import (
decode_hex,
to_bytes,
ValidationError,
)
from eth_utils.toolz import (
assoc,
assoc_in,
)
from p2p.discv5.enr import (
ENR,
ENRSedes,
UnsignedENR,
)
from p2p.discv5.identity_schemes import (
IdentityScheme,
V4IdentityScheme,
IdentitySchemeRegistry,
)
from p2p.forkid import ForkID
OFFICIAL_TEST_DATA = {
"repr": (
"enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkT"
"fj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHY"
"pMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8"
),
"private_key": decode_hex("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291"),
"public_key": decode_hex("03ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd3138"),
"node_id": decode_hex("a448f24c6d18e575453db13171562b71999873db5b286df957af199ec94617f7"),
"identity_scheme": V4IdentityScheme,
"sequence_number": 1,
"kv_pairs": {
b"id": b"v4",
b"ip": decode_hex("7f000001"),
b"secp256k1": decode_hex(
"03ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd3138",
),
b"udp": 0x765f,
}
}
REAL_LIFE_TEST_DATA = {
"repr": (
"enr:-Jq4QO5zEyIBU5lSa9iaen0A2xUB5_IVrCi1DbyASTTnLV5RJan6aGPr8kU0p0MYKU5YezZgdSUE"
"-GOBEio6Ultyf1Aog2V0aMrJhGN2AZCDGfCggmlkgnY0gmlwhF4_wLuJc2VjcDI1NmsxoQOt7cA_B_Kg"
"nQ5RmwyA6ji8M1Y0jfINItRGbOOwy7XgbIN0Y3CCdl-DdWRwgnZf"
),
"public_key": decode_hex("03adedc03f07f2a09d0e519b0c80ea38bc3356348df20d22d4466ce3b0cbb5e06c"),
"node_id": decode_hex("dc8542768b457753669bebfe215d5f9ef4adb7d7df84beabddbe98350869165f"),
"identity_scheme": V4IdentityScheme,
"sequence_number": 40,
"kv_pairs": {
b"eth": (ForkID(hash=to_bytes(hexstr='0x63760190'), next=1700000), ),
b"id": b"v4",
b"ip": decode_hex("5e3fc0bb"),
b"secp256k1": decode_hex(
"03adedc03f07f2a09d0e519b0c80ea38bc3356348df20d22d4466ce3b0cbb5e06c",
),
b"tcp": 30303,
b"udp": 30303,
}
}
class MockIdentityScheme(IdentityScheme):
id = b"mock"
private_key_size = 32
@classmethod
def create_enr_signature(cls, enr, private_key: bytes) -> bytes:
if len(private_key) != cls.private_key_size:
raise ValidationError("Invalid private key")
return private_key + enr.get_signing_message()
@classmethod
def validate_enr_structure(cls, enr) -> None:
pass
@classmethod
def validate_enr_signature(cls, enr) -> None:
if not enr.signature == enr.node_id + enr.get_signing_message():
raise ValidationError("Invalid signature")
@classmethod
def extract_public_key(cls, enr) -> bytes:
return b""
@classmethod
def extract_node_id(cls, enr) -> bytes:
return enr.signature[:cls.private_key_size]
@pytest.fixture
def mock_identity_scheme():
return MockIdentityScheme
@pytest.fixture
def identity_scheme_registry(mock_identity_scheme):
registry = IdentitySchemeRegistry()
registry.register(V4IdentityScheme)
registry.register(mock_identity_scheme)
return registry
def test_mapping_interface(identity_scheme_registry):
kv_pairs = {
b"id": b"mock",
b"key1": b"value1",
b"key2": b"value2",
}
enr = ENR(
signature=b"",
sequence_number=0,
kv_pairs=kv_pairs,
identity_scheme_registry=identity_scheme_registry,
)
for key, value in kv_pairs.items():
assert key in enr
assert enr[key] == value
assert enr.get(key) == value
not_a_key = b"key3"
assert not_a_key not in kv_pairs
assert not_a_key not in enr
enr.get(not_a_key) is None
assert enr.get(not_a_key, b"default") == b"default"
assert tuple(enr.keys()) == tuple(kv_pairs.keys())
assert tuple(enr.values()) == tuple(kv_pairs.values())
assert tuple(enr.items()) == tuple(kv_pairs.items())
assert len(enr) == len(kv_pairs)
assert tuple(iter(enr)) == tuple(iter(kv_pairs))
def test_inititialization(identity_scheme_registry):
valid_sequence_number = 0
valid_kv_pairs = {b"id": b"mock"}
valid_signature = b""
assert UnsignedENR(
sequence_number=valid_sequence_number,
kv_pairs=valid_kv_pairs,
identity_scheme_registry=identity_scheme_registry,
)
assert ENR(
sequence_number=valid_sequence_number,
kv_pairs=valid_kv_pairs,
signature=valid_signature,
identity_scheme_registry=identity_scheme_registry,
)
with pytest.raises(ValidationError):
UnsignedENR(
sequence_number=valid_sequence_number,
kv_pairs={b"no-id": b""},
identity_scheme_registry=identity_scheme_registry,
)
with pytest.raises(ValidationError):
ENR(
sequence_number=valid_sequence_number,
kv_pairs={b"no-id": b""},
signature=valid_signature,
identity_scheme_registry=identity_scheme_registry,
)
with pytest.raises(ValidationError):
UnsignedENR(
sequence_number=-1,
kv_pairs=valid_kv_pairs,
identity_scheme_registry=identity_scheme_registry,
)
with pytest.raises(ValidationError):
ENR(
sequence_number=-1,
kv_pairs=valid_kv_pairs,
signature=valid_signature,
identity_scheme_registry=identity_scheme_registry,
)
def test_signing(mock_identity_scheme, identity_scheme_registry):
unsigned_enr = UnsignedENR(
sequence_number=0,
kv_pairs={b"id": b"mock"},
identity_scheme_registry=identity_scheme_registry
)
private_key = b"\x00" * 32
enr = unsigned_enr.to_signed_enr(private_key)
assert enr.signature == mock_identity_scheme.create_enr_signature(enr, private_key)
def test_signature_validation(mock_identity_scheme, identity_scheme_registry):
unsigned_enr = UnsignedENR(0, {b"id": b"mock"}, identity_scheme_registry)
private_key = b"\x00" * 32
enr = unsigned_enr.to_signed_enr(private_key)
enr.validate_signature()
invalid_signature = b"\xff" * 64
invalid_enr = ENR(
enr.sequence_number,
dict(enr),
invalid_signature,
identity_scheme_registry=identity_scheme_registry
)
with pytest.raises(ValidationError):
invalid_enr.validate_signature()
with pytest.raises(ValidationError):
ENR(
0,
{b"id": b"unknown"},
b"",
identity_scheme_registry=identity_scheme_registry,
)
def test_public_key(mock_identity_scheme, identity_scheme_registry):
unsigned_enr = UnsignedENR(0, {b"id": b"mock"}, identity_scheme_registry)
private_key = b"\x00" * 32
enr = unsigned_enr.to_signed_enr(private_key)
assert enr.public_key == mock_identity_scheme.extract_public_key(enr)
def test_node_id(mock_identity_scheme, identity_scheme_registry):
unsigned_enr = UnsignedENR(0, {b"id": b"mock"}, identity_scheme_registry)
private_key = b"\x00" * 32
enr = unsigned_enr.to_signed_enr(private_key)
assert enr.node_id == private_key
def test_signature_scheme_selection(mock_identity_scheme, identity_scheme_registry):
mock_enr = ENR(0, {b"id": b"mock"}, b"", identity_scheme_registry)
assert mock_enr.identity_scheme is mock_identity_scheme
v4_enr = ENR(0, {b"id": b"v4", b"secp256k1": b"\x02" * 33}, b"", identity_scheme_registry)
assert v4_enr.identity_scheme is V4IdentityScheme
with pytest.raises(ValidationError):
ENR(0, {b"id": b"other"}, b"", identity_scheme_registry)
def test_repr(mock_identity_scheme, identity_scheme_registry):
unsigned_enr = UnsignedENR(0, {b"id": b"mock"}, identity_scheme_registry)
enr = unsigned_enr.to_signed_enr(b"\x00" * 32)
base64_encoded_enr = base64.urlsafe_b64encode(rlp.encode(enr))
represented_enr = repr(enr)
assert represented_enr.startswith("enr:")
assert base64_encoded_enr.rstrip(b"=").decode() == represented_enr[4:]
assert ENR.from_repr(represented_enr, identity_scheme_registry) == enr
def test_deserialization_key_order_validation(identity_scheme_registry):
serialized_enr = rlp.encode([
b"signature",
0,
b"key1",
b"value1",
b"id",
b"",
b"key2",
b"value2",
])
with pytest.raises(rlp.DeserializationError):
rlp.decode(
serialized_enr,
ENRSedes,
identity_scheme_registry=identity_scheme_registry,
)
def test_deserialization_key_uniqueness_validation(identity_scheme_registry):
serialized_enr = rlp.encode([
b"signature",
0,
b"key1",
b"value1",
b"id",
b"",
b"key1",
b"value2",
])
with pytest.raises(rlp.DeserializationError):
rlp.decode(
serialized_enr,
ENRSedes,
identity_scheme_registry=identity_scheme_registry,
)
@pytest.mark.parametrize("incomplete_enr", (
(),
(b"signature",),
(b"signature", 0, b"key1"),
(b"signature", 0, b"key1", b"value1", b"id"),
))
def test_deserialization_completeness_validation(incomplete_enr, identity_scheme_registry):
incomplete_enr_rlp = rlp.encode(incomplete_enr)
with pytest.raises(rlp.DeserializationError):
rlp.decode(
incomplete_enr_rlp,
ENRSedes,
identity_scheme_registry=identity_scheme_registry,
)
def test_equality(identity_scheme_registry):
base_kwargs = {
"sequence_number": 0,
"kv_pairs": {
b"id": b"mock",
b"key1": b"value1",
b"key2": b"value2",
},
"signature": b"signature",
"identity_scheme_registry": identity_scheme_registry,
}
base_enr = ENR(**base_kwargs)
equal_enr = ENR(**base_kwargs)
enr_different_sequence_number = ENR(
**assoc(base_kwargs, "sequence_number", 1)
)
enr_different_kv_pairs = ENR(
**assoc_in(base_kwargs, ("kv_pairs", b"key1"), b"value2"),
)
enr_different_signature = ENR(
**assoc(base_kwargs, "signature", b"different-signature")
)
assert base_enr == base_enr
assert equal_enr == base_enr
assert enr_different_sequence_number != base_enr
assert enr_different_kv_pairs != base_enr
assert enr_different_signature != base_enr
def test_serialization_roundtrip(identity_scheme_registry):
original_enr = ENR(
sequence_number=0,
kv_pairs={
b"id": b"mock",
b"key2": b"value2",
b"key1": b"value1",
},
signature=b"",
identity_scheme_registry=identity_scheme_registry,
)
encoded = rlp.encode(original_enr)
recovered_enr = rlp.decode(
encoded,
ENR,
identity_scheme_registry=identity_scheme_registry,
)
assert recovered_enr == original_enr
@pytest.mark.parametrize("invalid_kv_pairs", (
{b"id": b"v4"},
{b"id": b"v4", b"secp256k1": b"\x00"},
))
def test_v4_structure_validation(invalid_kv_pairs, identity_scheme_registry):
with pytest.raises(ValidationError):
UnsignedENR(
sequence_number=0,
kv_pairs=invalid_kv_pairs,
identity_scheme_registry=identity_scheme_registry,
)
def test_official_test_vector():
enr = ENR.from_repr(OFFICIAL_TEST_DATA["repr"])
assert enr.sequence_number == OFFICIAL_TEST_DATA["sequence_number"]
assert dict(enr) == OFFICIAL_TEST_DATA["kv_pairs"]
assert enr.public_key == OFFICIAL_TEST_DATA["public_key"]
assert enr.node_id == OFFICIAL_TEST_DATA["node_id"]
assert enr.identity_scheme is OFFICIAL_TEST_DATA["identity_scheme"]
assert repr(enr) == OFFICIAL_TEST_DATA["repr"]
unsigned_enr = UnsignedENR(enr.sequence_number, dict(enr))
reconstructed_enr = unsigned_enr.to_signed_enr(OFFICIAL_TEST_DATA["private_key"])
assert reconstructed_enr == enr
def test_real_life_test_vector():
enr = ENR.from_repr(REAL_LIFE_TEST_DATA["repr"])
assert enr.sequence_number == REAL_LIFE_TEST_DATA["sequence_number"]
assert enr.public_key == REAL_LIFE_TEST_DATA["public_key"]
assert enr.node_id == REAL_LIFE_TEST_DATA["node_id"]
assert enr.identity_scheme is REAL_LIFE_TEST_DATA["identity_scheme"]
assert dict(enr) == REAL_LIFE_TEST_DATA["kv_pairs"]
assert repr(enr) == REAL_LIFE_TEST_DATA["repr"]
| true | true |
f72107e0ab86bdefce931d0993f38f0d3db29c26 | 12,483 | py | Python | mypy/test/testpep561.py | chubbymaggie/mypy | 50c3dfcdca94726130e8cfdb6bde02b3eeca4e09 | [
"PSF-2.0"
] | 1 | 2019-06-15T08:26:28.000Z | 2019-06-15T08:26:28.000Z | mypy/test/testpep561.py | chubbymaggie/mypy | 50c3dfcdca94726130e8cfdb6bde02b3eeca4e09 | [
"PSF-2.0"
] | 1 | 2021-03-31T20:22:11.000Z | 2021-03-31T20:22:11.000Z | mypy/test/testpep561.py | chubbymaggie/mypy | 50c3dfcdca94726130e8cfdb6bde02b3eeca4e09 | [
"PSF-2.0"
] | null | null | null | from contextlib import contextmanager
from enum import Enum
import os
import sys
import tempfile
from typing import Tuple, List, Generator, Optional
from unittest import TestCase, main
import mypy.api
from mypy.modulefinder import get_site_packages_dirs
from mypy.test.config import package_path
from mypy.test.helpers import run_command
from mypy.util import try_find_python2_interpreter
# NOTE: options.use_builtins_fixtures should not be set in these
# tests, otherwise mypy will ignore installed third-party packages.
SIMPLE_PROGRAM = """
from typedpkg.sample import ex
from typedpkg import dne
a = ex([''])
reveal_type(a)
"""
_NAMESPACE_PROGRAM = """
{import_style}
from typedpkg_ns.ns.dne import dne
af("abc")
bf(False)
dne(123)
af(False)
bf(2)
dne("abc")
"""
class NSImportStyle(Enum):
# These should all be on exactly two lines because NamespaceMsg
# uses line numbers which expect the imports to be exactly two lines
from_import = """\
from typedpkg.pkg.aaa import af
from typedpkg_ns.ns.bbb import bf"""
import_as = """\
import typedpkg.pkg.aaa as nm; af = nm.af
import typedpkg_ns.ns.bbb as am; bf = am.bf"""
reg_import = """\
import typedpkg.pkg.aaa; af = typedpkg.pkg.aaa.af
import typedpkg_ns.ns.bbb; bf = typedpkg_ns.ns.bbb.bf"""
class SimpleMsg(Enum):
msg_dne = "{tempfile}:3: error: Module 'typedpkg' has no attribute 'dne'"
msg_list = "{tempfile}:5: error: Revealed type is 'builtins.list[builtins.str]'"
msg_tuple = "{tempfile}:5: error: Revealed type is 'builtins.tuple[builtins.str]'"
class NamespaceMsg(Enum):
cfm_beta = ("{tempfile}:4: error: Cannot find module named "
"'typedpkg_ns.ns.dne'")
help_note = ('{tempfile}:4: note: (Perhaps setting MYPYPATH or using the '
'"--ignore-missing-imports" flag would help)')
bool_str = ('{tempfile}:10: error: Argument 1 has incompatible type '
'"bool"; expected "str"')
int_bool = ('{tempfile}:11: error: Argument 1 has incompatible type '
'"int"; expected "bool"')
to_bool_str = ('{tempfile}:10: error: Argument 1 to "af" has incompatible type '
'"bool"; expected "str"')
to_int_bool = ('{tempfile}:11: error: Argument 1 to "bf" has incompatible type '
'"int"; expected "bool"')
def create_ns_program_src(import_style: NSImportStyle) -> str:
return _NAMESPACE_PROGRAM.format(import_style=import_style.value)
class ExampleProg(object):
_fname = 'test_program.py'
def __init__(self, source_code: str) -> None:
self._source_code = source_code
self._temp_dir = None # type: Optional[tempfile.TemporaryDirectory[str]]
self._full_fname = ''
def create(self) -> None:
self._temp_dir = tempfile.TemporaryDirectory()
self._full_fname = os.path.join(self._temp_dir.name, self._fname)
with open(self._full_fname, 'w+') as f:
f.write(self._source_code)
def cleanup(self) -> None:
if self._temp_dir:
self._temp_dir.cleanup()
def build_msg(self, *msgs: Enum) -> str:
return '\n'.join(
msg.value.format(tempfile=self._full_fname)
for msg in msgs
) + '\n'
def check_mypy_run(self,
python_executable: str,
expected_out: List[Enum],
expected_err: str = '',
expected_returncode: int = 1,
venv_dir: Optional[str] = None) -> None:
"""Helper to run mypy and check the output."""
cmd_line = [self._full_fname]
if venv_dir is not None:
old_dir = os.getcwd()
os.chdir(venv_dir)
try:
if python_executable != sys.executable:
cmd_line.append('--python-executable={}'.format(python_executable))
out, err, returncode = mypy.api.run(cmd_line)
assert out == self.build_msg(*expected_out), err
assert err == expected_err, out
assert returncode == expected_returncode, returncode
finally:
if venv_dir is not None:
os.chdir(old_dir)
class TestPEP561(TestCase):
@contextmanager
def virtualenv(self,
python_executable: str = sys.executable
) -> Generator[Tuple[str, str], None, None]:
"""Context manager that creates a virtualenv in a temporary directory
returns the path to the created Python executable"""
# Sadly, we need virtualenv, as the Python 3 venv module does not support creating a venv
# for Python 2, and Python 2 does not have its own venv.
with tempfile.TemporaryDirectory() as venv_dir:
returncode, lines = run_command([sys.executable,
'-m',
'virtualenv',
'-p{}'.format(python_executable),
venv_dir], cwd=os.getcwd())
if returncode != 0:
err = '\n'.join(lines)
self.fail("Failed to create venv. Do you have virtualenv installed?\n" + err)
if sys.platform == 'win32':
yield venv_dir, os.path.abspath(os.path.join(venv_dir, 'Scripts', 'python'))
else:
yield venv_dir, os.path.abspath(os.path.join(venv_dir, 'bin', 'python'))
def install_package(self, pkg: str,
python_executable: str = sys.executable,
use_pip: bool = True,
editable: bool = False) -> None:
"""Context manager to temporarily install a package from test-data/packages/pkg/"""
working_dir = os.path.join(package_path, pkg)
if use_pip:
install_cmd = [python_executable, '-m', 'pip', 'install']
if editable:
install_cmd.append('-e')
install_cmd.append('.')
else:
install_cmd = [python_executable, 'setup.py']
if editable:
install_cmd.append('develop')
else:
install_cmd.append('install')
returncode, lines = run_command(install_cmd, cwd=working_dir)
if returncode != 0:
self.fail('\n'.join(lines))
def setUp(self) -> None:
self.simple_prog = ExampleProg(SIMPLE_PROGRAM)
self.from_ns_prog = ExampleProg(create_ns_program_src(NSImportStyle.from_import))
self.import_as_ns_prog = ExampleProg(create_ns_program_src(NSImportStyle.import_as))
self.regular_import_ns_prog = ExampleProg(create_ns_program_src(NSImportStyle.reg_import))
def tearDown(self) -> None:
self.simple_prog.cleanup()
self.from_ns_prog.cleanup()
self.import_as_ns_prog.cleanup()
self.regular_import_ns_prog.cleanup()
def test_get_pkg_dirs(self) -> None:
"""Check that get_package_dirs works."""
dirs = get_site_packages_dirs(sys.executable)
assert dirs
def test_typedpkg_stub_package(self) -> None:
self.simple_prog.create()
with self.virtualenv() as venv:
venv_dir, python_executable = venv
self.install_package('typedpkg-stubs', python_executable)
self.simple_prog.check_mypy_run(
python_executable,
[SimpleMsg.msg_dne, SimpleMsg.msg_list],
venv_dir=venv_dir,
)
def test_typedpkg(self) -> None:
self.simple_prog.create()
with self.virtualenv() as venv:
venv_dir, python_executable = venv
self.install_package('typedpkg', python_executable)
self.simple_prog.check_mypy_run(
python_executable,
[SimpleMsg.msg_tuple],
venv_dir=venv_dir,
)
def test_stub_and_typed_pkg(self) -> None:
self.simple_prog.create()
with self.virtualenv() as venv:
venv_dir, python_executable = venv
self.install_package('typedpkg', python_executable)
self.install_package('typedpkg-stubs', python_executable)
self.simple_prog.check_mypy_run(
python_executable,
[SimpleMsg.msg_list],
venv_dir=venv_dir,
)
def test_typedpkg_stubs_python2(self) -> None:
self.simple_prog.create()
python2 = try_find_python2_interpreter()
if python2:
with self.virtualenv(python2) as venv:
venv_dir, py2 = venv
self.install_package('typedpkg-stubs', py2)
self.simple_prog.check_mypy_run(
py2,
[SimpleMsg.msg_dne, SimpleMsg.msg_list],
venv_dir=venv_dir,
)
def test_typedpkg_python2(self) -> None:
self.simple_prog.create()
python2 = try_find_python2_interpreter()
if python2:
with self.virtualenv(python2) as venv:
venv_dir, py2 = venv
self.install_package('typedpkg', py2)
self.simple_prog.check_mypy_run(
py2,
[SimpleMsg.msg_tuple],
venv_dir=venv_dir,
)
def test_typedpkg_egg(self) -> None:
self.simple_prog.create()
with self.virtualenv() as venv:
venv_dir, python_executable = venv
self.install_package('typedpkg', python_executable, use_pip=False)
self.simple_prog.check_mypy_run(
python_executable,
[SimpleMsg.msg_tuple],
venv_dir=venv_dir,
)
def test_typedpkg_editable(self) -> None:
self.simple_prog.create()
with self.virtualenv() as venv:
venv_dir, python_executable = venv
self.install_package('typedpkg', python_executable, editable=True)
self.simple_prog.check_mypy_run(
python_executable,
[SimpleMsg.msg_tuple],
venv_dir=venv_dir,
)
def test_typedpkg_egg_editable(self) -> None:
self.simple_prog.create()
with self.virtualenv() as venv:
venv_dir, python_executable = venv
self.install_package('typedpkg', python_executable, use_pip=False, editable=True)
self.simple_prog.check_mypy_run(
python_executable,
[SimpleMsg.msg_tuple],
venv_dir=venv_dir,
)
def test_nested_and_namespace_from_import(self) -> None:
self.from_ns_prog.create()
with self.virtualenv() as venv:
venv_dir, python_executable = venv
self.install_package('typedpkg', python_executable)
self.install_package('typedpkg_ns', python_executable)
self.from_ns_prog.check_mypy_run(
python_executable,
[NamespaceMsg.cfm_beta,
NamespaceMsg.help_note,
NamespaceMsg.to_bool_str,
NamespaceMsg.to_int_bool],
venv_dir=venv_dir,
)
def test_nested_and_namespace_import_as(self) -> None:
self.import_as_ns_prog.create()
with self.virtualenv() as venv:
venv_dir, python_executable = venv
self.install_package('typedpkg', python_executable)
self.install_package('typedpkg_ns', python_executable)
self.import_as_ns_prog.check_mypy_run(
python_executable,
[NamespaceMsg.cfm_beta,
NamespaceMsg.help_note,
NamespaceMsg.bool_str,
NamespaceMsg.int_bool],
venv_dir=venv_dir,
)
def test_nested_and_namespace_regular_import(self) -> None:
self.regular_import_ns_prog.create()
with self.virtualenv() as venv:
venv_dir, python_executable = venv
self.install_package('typedpkg', python_executable)
self.install_package('typedpkg_ns', python_executable)
self.regular_import_ns_prog.check_mypy_run(
python_executable,
[NamespaceMsg.cfm_beta,
NamespaceMsg.help_note,
NamespaceMsg.bool_str,
NamespaceMsg.int_bool],
venv_dir=venv_dir,
)
if __name__ == '__main__':
main()
| 37.827273 | 98 | 0.599295 | from contextlib import contextmanager
from enum import Enum
import os
import sys
import tempfile
from typing import Tuple, List, Generator, Optional
from unittest import TestCase, main
import mypy.api
from mypy.modulefinder import get_site_packages_dirs
from mypy.test.config import package_path
from mypy.test.helpers import run_command
from mypy.util import try_find_python2_interpreter
SIMPLE_PROGRAM = """
from typedpkg.sample import ex
from typedpkg import dne
a = ex([''])
reveal_type(a)
"""
_NAMESPACE_PROGRAM = """
{import_style}
from typedpkg_ns.ns.dne import dne
af("abc")
bf(False)
dne(123)
af(False)
bf(2)
dne("abc")
"""
class NSImportStyle(Enum):
from_import = """\
from typedpkg.pkg.aaa import af
from typedpkg_ns.ns.bbb import bf"""
import_as = """\
import typedpkg.pkg.aaa as nm; af = nm.af
import typedpkg_ns.ns.bbb as am; bf = am.bf"""
reg_import = """\
import typedpkg.pkg.aaa; af = typedpkg.pkg.aaa.af
import typedpkg_ns.ns.bbb; bf = typedpkg_ns.ns.bbb.bf"""
class SimpleMsg(Enum):
msg_dne = "{tempfile}:3: error: Module 'typedpkg' has no attribute 'dne'"
msg_list = "{tempfile}:5: error: Revealed type is 'builtins.list[builtins.str]'"
msg_tuple = "{tempfile}:5: error: Revealed type is 'builtins.tuple[builtins.str]'"
class NamespaceMsg(Enum):
cfm_beta = ("{tempfile}:4: error: Cannot find module named "
"'typedpkg_ns.ns.dne'")
help_note = ('{tempfile}:4: note: (Perhaps setting MYPYPATH or using the '
'"--ignore-missing-imports" flag would help)')
bool_str = ('{tempfile}:10: error: Argument 1 has incompatible type '
'"bool"; expected "str"')
int_bool = ('{tempfile}:11: error: Argument 1 has incompatible type '
'"int"; expected "bool"')
to_bool_str = ('{tempfile}:10: error: Argument 1 to "af" has incompatible type '
'"bool"; expected "str"')
to_int_bool = ('{tempfile}:11: error: Argument 1 to "bf" has incompatible type '
'"int"; expected "bool"')
def create_ns_program_src(import_style: NSImportStyle) -> str:
return _NAMESPACE_PROGRAM.format(import_style=import_style.value)
class ExampleProg(object):
_fname = 'test_program.py'
def __init__(self, source_code: str) -> None:
self._source_code = source_code
self._temp_dir = None
self._full_fname = ''
def create(self) -> None:
self._temp_dir = tempfile.TemporaryDirectory()
self._full_fname = os.path.join(self._temp_dir.name, self._fname)
with open(self._full_fname, 'w+') as f:
f.write(self._source_code)
def cleanup(self) -> None:
if self._temp_dir:
self._temp_dir.cleanup()
def build_msg(self, *msgs: Enum) -> str:
return '\n'.join(
msg.value.format(tempfile=self._full_fname)
for msg in msgs
) + '\n'
def check_mypy_run(self,
python_executable: str,
expected_out: List[Enum],
expected_err: str = '',
expected_returncode: int = 1,
venv_dir: Optional[str] = None) -> None:
cmd_line = [self._full_fname]
if venv_dir is not None:
old_dir = os.getcwd()
os.chdir(venv_dir)
try:
if python_executable != sys.executable:
cmd_line.append('--python-executable={}'.format(python_executable))
out, err, returncode = mypy.api.run(cmd_line)
assert out == self.build_msg(*expected_out), err
assert err == expected_err, out
assert returncode == expected_returncode, returncode
finally:
if venv_dir is not None:
os.chdir(old_dir)
class TestPEP561(TestCase):
@contextmanager
def virtualenv(self,
python_executable: str = sys.executable
) -> Generator[Tuple[str, str], None, None]:
with tempfile.TemporaryDirectory() as venv_dir:
returncode, lines = run_command([sys.executable,
'-m',
'virtualenv',
'-p{}'.format(python_executable),
venv_dir], cwd=os.getcwd())
if returncode != 0:
err = '\n'.join(lines)
self.fail("Failed to create venv. Do you have virtualenv installed?\n" + err)
if sys.platform == 'win32':
yield venv_dir, os.path.abspath(os.path.join(venv_dir, 'Scripts', 'python'))
else:
yield venv_dir, os.path.abspath(os.path.join(venv_dir, 'bin', 'python'))
def install_package(self, pkg: str,
python_executable: str = sys.executable,
use_pip: bool = True,
editable: bool = False) -> None:
working_dir = os.path.join(package_path, pkg)
if use_pip:
install_cmd = [python_executable, '-m', 'pip', 'install']
if editable:
install_cmd.append('-e')
install_cmd.append('.')
else:
install_cmd = [python_executable, 'setup.py']
if editable:
install_cmd.append('develop')
else:
install_cmd.append('install')
returncode, lines = run_command(install_cmd, cwd=working_dir)
if returncode != 0:
self.fail('\n'.join(lines))
def setUp(self) -> None:
self.simple_prog = ExampleProg(SIMPLE_PROGRAM)
self.from_ns_prog = ExampleProg(create_ns_program_src(NSImportStyle.from_import))
self.import_as_ns_prog = ExampleProg(create_ns_program_src(NSImportStyle.import_as))
self.regular_import_ns_prog = ExampleProg(create_ns_program_src(NSImportStyle.reg_import))
def tearDown(self) -> None:
self.simple_prog.cleanup()
self.from_ns_prog.cleanup()
self.import_as_ns_prog.cleanup()
self.regular_import_ns_prog.cleanup()
def test_get_pkg_dirs(self) -> None:
dirs = get_site_packages_dirs(sys.executable)
assert dirs
def test_typedpkg_stub_package(self) -> None:
self.simple_prog.create()
with self.virtualenv() as venv:
venv_dir, python_executable = venv
self.install_package('typedpkg-stubs', python_executable)
self.simple_prog.check_mypy_run(
python_executable,
[SimpleMsg.msg_dne, SimpleMsg.msg_list],
venv_dir=venv_dir,
)
def test_typedpkg(self) -> None:
self.simple_prog.create()
with self.virtualenv() as venv:
venv_dir, python_executable = venv
self.install_package('typedpkg', python_executable)
self.simple_prog.check_mypy_run(
python_executable,
[SimpleMsg.msg_tuple],
venv_dir=venv_dir,
)
def test_stub_and_typed_pkg(self) -> None:
self.simple_prog.create()
with self.virtualenv() as venv:
venv_dir, python_executable = venv
self.install_package('typedpkg', python_executable)
self.install_package('typedpkg-stubs', python_executable)
self.simple_prog.check_mypy_run(
python_executable,
[SimpleMsg.msg_list],
venv_dir=venv_dir,
)
def test_typedpkg_stubs_python2(self) -> None:
self.simple_prog.create()
python2 = try_find_python2_interpreter()
if python2:
with self.virtualenv(python2) as venv:
venv_dir, py2 = venv
self.install_package('typedpkg-stubs', py2)
self.simple_prog.check_mypy_run(
py2,
[SimpleMsg.msg_dne, SimpleMsg.msg_list],
venv_dir=venv_dir,
)
def test_typedpkg_python2(self) -> None:
self.simple_prog.create()
python2 = try_find_python2_interpreter()
if python2:
with self.virtualenv(python2) as venv:
venv_dir, py2 = venv
self.install_package('typedpkg', py2)
self.simple_prog.check_mypy_run(
py2,
[SimpleMsg.msg_tuple],
venv_dir=venv_dir,
)
def test_typedpkg_egg(self) -> None:
self.simple_prog.create()
with self.virtualenv() as venv:
venv_dir, python_executable = venv
self.install_package('typedpkg', python_executable, use_pip=False)
self.simple_prog.check_mypy_run(
python_executable,
[SimpleMsg.msg_tuple],
venv_dir=venv_dir,
)
def test_typedpkg_editable(self) -> None:
self.simple_prog.create()
with self.virtualenv() as venv:
venv_dir, python_executable = venv
self.install_package('typedpkg', python_executable, editable=True)
self.simple_prog.check_mypy_run(
python_executable,
[SimpleMsg.msg_tuple],
venv_dir=venv_dir,
)
def test_typedpkg_egg_editable(self) -> None:
self.simple_prog.create()
with self.virtualenv() as venv:
venv_dir, python_executable = venv
self.install_package('typedpkg', python_executable, use_pip=False, editable=True)
self.simple_prog.check_mypy_run(
python_executable,
[SimpleMsg.msg_tuple],
venv_dir=venv_dir,
)
def test_nested_and_namespace_from_import(self) -> None:
self.from_ns_prog.create()
with self.virtualenv() as venv:
venv_dir, python_executable = venv
self.install_package('typedpkg', python_executable)
self.install_package('typedpkg_ns', python_executable)
self.from_ns_prog.check_mypy_run(
python_executable,
[NamespaceMsg.cfm_beta,
NamespaceMsg.help_note,
NamespaceMsg.to_bool_str,
NamespaceMsg.to_int_bool],
venv_dir=venv_dir,
)
def test_nested_and_namespace_import_as(self) -> None:
self.import_as_ns_prog.create()
with self.virtualenv() as venv:
venv_dir, python_executable = venv
self.install_package('typedpkg', python_executable)
self.install_package('typedpkg_ns', python_executable)
self.import_as_ns_prog.check_mypy_run(
python_executable,
[NamespaceMsg.cfm_beta,
NamespaceMsg.help_note,
NamespaceMsg.bool_str,
NamespaceMsg.int_bool],
venv_dir=venv_dir,
)
def test_nested_and_namespace_regular_import(self) -> None:
self.regular_import_ns_prog.create()
with self.virtualenv() as venv:
venv_dir, python_executable = venv
self.install_package('typedpkg', python_executable)
self.install_package('typedpkg_ns', python_executable)
self.regular_import_ns_prog.check_mypy_run(
python_executable,
[NamespaceMsg.cfm_beta,
NamespaceMsg.help_note,
NamespaceMsg.bool_str,
NamespaceMsg.int_bool],
venv_dir=venv_dir,
)
if __name__ == '__main__':
main()
| true | true |
f72108b9bfb35d1a7e2ad22f95c5ce9bc663f987 | 14,680 | py | Python | scripts/cluster/agent.py | nobusugi246/microk8s | 797720e2d1e74030fc3d8df5d291469c6082aaac | [
"Apache-2.0"
] | null | null | null | scripts/cluster/agent.py | nobusugi246/microk8s | 797720e2d1e74030fc3d8df5d291469c6082aaac | [
"Apache-2.0"
] | null | null | null | scripts/cluster/agent.py | nobusugi246/microk8s | 797720e2d1e74030fc3d8df5d291469c6082aaac | [
"Apache-2.0"
] | null | null | null | #!flask/bin/python
import getopt
import json
import os
import shutil
import socket
import string
import random
import subprocess
import sys
from .common.utils import try_set_file_permissions
from flask import Flask, jsonify, request, abort, Response
app = Flask(__name__)
CLUSTER_API="cluster/api/v1.0"
snapdata_path = os.environ.get('SNAP_DATA')
snap_path = os.environ.get('SNAP_DATA')
cluster_tokens_file = "{}/credentials/cluster-tokens.txt".format(snapdata_path)
callback_tokens_file = "{}/credentials/callback-tokens.txt".format(snapdata_path)
callback_token_file = "{}/credentials/callback-token.txt".format(snapdata_path)
certs_request_tokens_file = "{}/credentials/certs-request-tokens.txt".format(snapdata_path)
default_port = 25000
default_listen_interface = "0.0.0.0"
def get_service_name(service):
"""
Returns the service name from its configuration file name.
:param service: the name of the service configuration file
:returns: the service name
"""
if service in ["kube-proxy", "kube-apiserver", "kube-scheduler", "kube-controller-manager"]:
return service[len("kube-"), :]
else:
return service
def update_service_argument(service, key, val):
"""
Adds an argument to the arguments file of the service.
:param service: the service
:param key: the argument to add
:param val: the value for the argument
"""
args_file = "{}/args/{}".format(snapdata_path, service)
args_file_tmp = "{}/args/{}.tmp".format(snapdata_path, service)
found = False
with open(args_file_tmp, "w+") as bfp:
with open(args_file, "r+") as fp:
for _, line in enumerate(fp):
if line.startswith(key):
if val is not None:
bfp.write("{}={}\n".format(key, val))
found = True
else:
bfp.write("{}\n".format(line.rstrip()))
if not found and val is not None:
bfp.write("{}={}\n".format(key, val))
try_set_file_permissions(args_file_tmp)
shutil.move(args_file_tmp, args_file)
def store_callback_token(node, callback_token):
"""
Store a callback token
:param node: the node
:param callback_token: the token
"""
tmp_file = "{}.tmp".format(callback_tokens_file)
if not os.path.isfile(callback_tokens_file):
open(callback_tokens_file, 'a+')
os.chmod(callback_tokens_file, 0o600)
with open(tmp_file, "w") as backup_fp:
os.chmod(tmp_file, 0o600)
found = False
with open(callback_tokens_file, 'r+') as callback_fp:
for _, line in enumerate(callback_fp):
if line.startswith(node):
backup_fp.write("{} {}\n".format(node, callback_token))
found = True
else:
backup_fp.write(line)
if not found:
backup_fp.write("{} {}\n".format(node, callback_token))
try_set_file_permissions(tmp_file)
shutil.move(tmp_file, callback_tokens_file)
def sign_client_cert(cert_request, token):
"""
Sign a certificate request
:param cert_request: the request
:param token: a token acting as a request uuid
:returns: the certificate
"""
req_file = "{}/certs/request.{}.csr".format(snapdata_path, token)
sign_cmd = "openssl x509 -req -in {csr} -CA {SNAP_DATA}/certs/ca.crt -CAkey" \
" {SNAP_DATA}/certs/ca.key -CAcreateserial -out {SNAP_DATA}/certs/server.{token}.crt" \
" -days 100000".format(csr=req_file, SNAP_DATA=snapdata_path, token=token)
with open(req_file, 'w') as fp:
fp.write(cert_request)
subprocess.check_call(sign_cmd.split())
with open("{SNAP_DATA}/certs/server.{token}.crt".format(SNAP_DATA=snapdata_path, token=token)) as fp:
cert = fp.read()
return cert
def add_token_to_certs_request(token):
"""
Add a token to the file holding the nodes we expect a certificate request from
:param token: the token
"""
with open(certs_request_tokens_file, "a+") as fp:
fp.write("{}\n".format(token))
def remove_token_from_file(token, file):
"""
Remove a token from the valid tokens set
:param token: the token to be removed
:param file: the file to be removed from
"""
backup_file = "{}.backup".format(file)
# That is a critical section. We need to protect it.
# We are safe for now because flask serves one request at a time.
with open(backup_file, 'w') as back_fp:
with open(file, 'r') as fp:
for _, line in enumerate(fp):
if line.startswith(token):
continue
back_fp.write("{}".format(line))
shutil.copyfile(backup_file, file)
def get_token(name):
"""
Get token from known_tokens file
:param name: the name of the node
:returns: the token or None(if name doesn't exist)
"""
file = "{}/credentials/known_tokens.csv".format(snapdata_path)
with open(file) as fp:
line = fp.readline()
if name in line:
parts = line.split(',')
return parts[0].rstrip()
return None
def add_kubelet_token(hostname):
"""
Add a token for a node in the known tokens
:param hostname: the name of the node
:returns: the token added
"""
file = "{}/credentials/known_tokens.csv".format(snapdata_path)
old_token = get_token("system:node:{}".format(hostname))
if old_token:
return old_token.rstrip()
alpha = string.ascii_letters + string.digits
token = ''.join(random.SystemRandom().choice(alpha) for _ in range(32))
uid = ''.join(random.SystemRandom().choice(string.digits) for _ in range(8))
with open(file, 'a') as fp:
# TODO double check this format. Why is userid unique?
line = "{},system:node:{},kubelet,kubelet-{},\"system:nodes\"".format(token, hostname, uid)
fp.write(line + os.linesep)
return token.rstrip()
def getCA():
"""
Return the CA
:returns: the CA file contents
"""
ca_file = "{}/certs/ca.crt".format(snapdata_path)
with open(ca_file) as fp:
ca = fp.read()
return ca
def get_arg(key, file):
"""
Get an argument from an arguments file
:param key: the argument we look for
:param file: the arguments file to search in
:returns: the value of the argument or None(if the key doesn't exist)
"""
filename = "{}/args/{}".format(snapdata_path, file)
with open(filename) as fp:
for _, line in enumerate(fp):
if line.startswith(key):
args = line.split(' ')
args = args[-1].split('=')
return args[-1].rstrip()
return None
def is_valid(token, token_type=cluster_tokens_file):
"""
Check whether a token is valid
:param token: token to be checked
:param token_type: the type of token (bootstrap or signature)
:returns: True for a valid token, False otherwise
"""
with open(token_type) as fp:
for _, line in enumerate(fp):
if line.startswith(token):
return True
return False
def read_kubelet_args_file(node=None):
"""
Return the contents of the kubelet arguments file
:param node: node to add a host override (defaults to None)
:returns: the kubelet args file
"""
filename = "{}/args/kubelet".format(snapdata_path)
with open(filename) as fp:
args = fp.read()
if node:
args = "{}--hostname-override {}".format(args, node)
return args
def get_node_ep(hostname, remote_addr):
"""
Return the endpoint to be used for the node based by trying to resolve the hostname provided
:param hostname: the provided hostname
:param remote_addr: the address the request came from
:returns: the node's location
"""
try:
socket.gethostbyname(hostname)
return hostname
except socket.gaierror:
return remote_addr
return remote_addr
@app.route('/{}/join'.format(CLUSTER_API), methods=['POST'])
def join_node():
"""
Web call to join a node to the cluster
"""
if request.headers['Content-Type'] == 'application/json':
token = request.json['token']
hostname = request.json['hostname']
port = request.json['port']
callback_token = request.json['callback']
else:
token = request.form['token']
hostname = request.form['hostname']
port = request.form['port']
callback_token = request.form['callback']
if not is_valid(token):
error_msg={"error": "Invalid token"}
return Response(json.dumps(error_msg), mimetype='application/json', status=500)
add_token_to_certs_request(token)
remove_token_from_file(token, cluster_tokens_file)
node_addr = get_node_ep(hostname, request.remote_addr)
node_ep = "{}:{}".format(node_addr, port)
store_callback_token(node_ep, callback_token)
ca = getCA()
etcd_ep = get_arg('--listen-client-urls', 'etcd')
api_port = get_arg('--secure-port', 'kube-apiserver')
proxy_token = get_token('kube-proxy')
kubelet_token = add_kubelet_token(hostname)
subprocess.check_call("systemctl restart snap.microk8s.daemon-apiserver.service".split())
if node_addr != hostname:
kubelet_args = read_kubelet_args_file(node_addr)
else:
kubelet_args = read_kubelet_args_file()
return jsonify(ca=ca,
etcd=etcd_ep,
kubeproxy=proxy_token,
apiport=api_port,
kubelet=kubelet_token,
kubelet_args=kubelet_args,
hostname_override=node_addr)
@app.route('/{}/sign-cert'.format(CLUSTER_API), methods=['POST'])
def sign_cert():
"""
Web call to sign a certificate
"""
if request.headers['Content-Type'] == 'application/json':
token = request.json['token']
cert_request = request.json['request']
else:
token = request.form['token']
cert_request = request.form['request']
if not is_valid(token, certs_request_tokens_file):
error_msg={"error": "Invalid token"}
return Response(json.dumps(error_msg), mimetype='application/json', status=500)
remove_token_from_file(token, certs_request_tokens_file)
signed_cert = sign_client_cert(cert_request, token)
return jsonify(certificate=signed_cert)
@app.route('/{}/configure'.format(CLUSTER_API), methods=['POST'])
def configure():
"""
Web call to configure the node
"""
if request.headers['Content-Type'] == 'application/json':
callback_token = request.json['callback']
configuration = request.json
else:
callback_token = request.form['callback']
configuration = json.loads(request.form['configuration'])
if not is_valid(callback_token, callback_token_file):
error_msg={"error": "Invalid token"}
return Response(json.dumps(error_msg), mimetype='application/json', status=500)
# We expect something like this:
'''
{
"callback": "xyztoken"
"service":
[
{
"name": "kubelet",
"arguments_remove":
[
"myoldarg"
],
"arguments_update":
[
{"myarg": "myvalue"},
{"myarg2": "myvalue2"},
{"myarg3": "myvalue3"}
],
"restart": False
},
{
"name": "kube-proxy",
"restart": True
}
],
"addon":
[
{
"name": "gpu",
"enable": True
},
{
"name": "gpu",
"disable": True
}
]
}
'''
if "service" in configuration:
for service in configuration["service"]:
print("{}".format(service["name"]))
if "arguments_update" in service:
print("Updating arguments")
for argument in service["arguments_update"]:
for key, val in argument.items():
print("{} is {}".format(key, val))
update_service_argument(service["name"], key, val)
if "arguments_remove" in service:
print("Removing arguments")
for argument in service["arguments_remove"]:
print("{}".format(argument))
update_service_argument(service["name"], argument, None)
if "restart" in service and service["restart"]:
service_name = get_service_name(service["name"])
print("restarting {}".format(service["name"]))
subprocess.check_call("systemctl restart snap.microk8s.daemon-{}.service".format(service_name).split())
if "addon" in configuration:
for addon in configuration["addon"]:
print("{}".format(addon["name"]))
if "enable" in addon and addon["enable"]:
print("Enabling {}".format(addon["name"]))
subprocess.check_call("{}/microk8s-enable.wrapper {}".format(snap_path, addon["name"]).split())
if "disable" in addon and addon["disable"]:
print("Disabling {}".format(addon["name"]))
subprocess.check_call("{}/microk8s-disable.wrapper {}".format(snap_path, addon["name"]).split())
resp_date = {"result": "ok"}
resp = Response(json.dumps(resp_date), status=200, mimetype='application/json')
return resp
def usage():
print("Agent responsible for setting up a cluster. Arguments:")
print("-l, --listen: interfaces to listen to (defaults to {})".format(default_listen_interface))
print("-p, --port: port to listen to (default {})".format(default_port))
if __name__ == '__main__':
server_cert = "{SNAP_DATA}/certs/server.crt".format(SNAP_DATA=snapdata_path)
server_key = "{SNAP_DATA}/certs/server.key".format(SNAP_DATA=snapdata_path)
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "hl:p:", ["help", "listen=", "port="])
except getopt.GetoptError as err:
print(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
port = default_port
listen = default_listen_interface
for o, a in opts:
if o in ("-l", "--listen"):
listen = a
if o in ("-p", "--port"):
port = a
elif o in ("-h", "--help"):
usage()
sys.exit(1)
else:
assert False, "unhandled option"
app.run(host=listen, port=port, ssl_context=(server_cert, server_key))
| 32.767857 | 119 | 0.611512 |
import getopt
import json
import os
import shutil
import socket
import string
import random
import subprocess
import sys
from .common.utils import try_set_file_permissions
from flask import Flask, jsonify, request, abort, Response
app = Flask(__name__)
CLUSTER_API="cluster/api/v1.0"
snapdata_path = os.environ.get('SNAP_DATA')
snap_path = os.environ.get('SNAP_DATA')
cluster_tokens_file = "{}/credentials/cluster-tokens.txt".format(snapdata_path)
callback_tokens_file = "{}/credentials/callback-tokens.txt".format(snapdata_path)
callback_token_file = "{}/credentials/callback-token.txt".format(snapdata_path)
certs_request_tokens_file = "{}/credentials/certs-request-tokens.txt".format(snapdata_path)
default_port = 25000
default_listen_interface = "0.0.0.0"
def get_service_name(service):
if service in ["kube-proxy", "kube-apiserver", "kube-scheduler", "kube-controller-manager"]:
return service[len("kube-"), :]
else:
return service
def update_service_argument(service, key, val):
args_file = "{}/args/{}".format(snapdata_path, service)
args_file_tmp = "{}/args/{}.tmp".format(snapdata_path, service)
found = False
with open(args_file_tmp, "w+") as bfp:
with open(args_file, "r+") as fp:
for _, line in enumerate(fp):
if line.startswith(key):
if val is not None:
bfp.write("{}={}\n".format(key, val))
found = True
else:
bfp.write("{}\n".format(line.rstrip()))
if not found and val is not None:
bfp.write("{}={}\n".format(key, val))
try_set_file_permissions(args_file_tmp)
shutil.move(args_file_tmp, args_file)
def store_callback_token(node, callback_token):
tmp_file = "{}.tmp".format(callback_tokens_file)
if not os.path.isfile(callback_tokens_file):
open(callback_tokens_file, 'a+')
os.chmod(callback_tokens_file, 0o600)
with open(tmp_file, "w") as backup_fp:
os.chmod(tmp_file, 0o600)
found = False
with open(callback_tokens_file, 'r+') as callback_fp:
for _, line in enumerate(callback_fp):
if line.startswith(node):
backup_fp.write("{} {}\n".format(node, callback_token))
found = True
else:
backup_fp.write(line)
if not found:
backup_fp.write("{} {}\n".format(node, callback_token))
try_set_file_permissions(tmp_file)
shutil.move(tmp_file, callback_tokens_file)
def sign_client_cert(cert_request, token):
req_file = "{}/certs/request.{}.csr".format(snapdata_path, token)
sign_cmd = "openssl x509 -req -in {csr} -CA {SNAP_DATA}/certs/ca.crt -CAkey" \
" {SNAP_DATA}/certs/ca.key -CAcreateserial -out {SNAP_DATA}/certs/server.{token}.crt" \
" -days 100000".format(csr=req_file, SNAP_DATA=snapdata_path, token=token)
with open(req_file, 'w') as fp:
fp.write(cert_request)
subprocess.check_call(sign_cmd.split())
with open("{SNAP_DATA}/certs/server.{token}.crt".format(SNAP_DATA=snapdata_path, token=token)) as fp:
cert = fp.read()
return cert
def add_token_to_certs_request(token):
with open(certs_request_tokens_file, "a+") as fp:
fp.write("{}\n".format(token))
def remove_token_from_file(token, file):
backup_file = "{}.backup".format(file)
with open(backup_file, 'w') as back_fp:
with open(file, 'r') as fp:
for _, line in enumerate(fp):
if line.startswith(token):
continue
back_fp.write("{}".format(line))
shutil.copyfile(backup_file, file)
def get_token(name):
file = "{}/credentials/known_tokens.csv".format(snapdata_path)
with open(file) as fp:
line = fp.readline()
if name in line:
parts = line.split(',')
return parts[0].rstrip()
return None
def add_kubelet_token(hostname):
file = "{}/credentials/known_tokens.csv".format(snapdata_path)
old_token = get_token("system:node:{}".format(hostname))
if old_token:
return old_token.rstrip()
alpha = string.ascii_letters + string.digits
token = ''.join(random.SystemRandom().choice(alpha) for _ in range(32))
uid = ''.join(random.SystemRandom().choice(string.digits) for _ in range(8))
with open(file, 'a') as fp:
line = "{},system:node:{},kubelet,kubelet-{},\"system:nodes\"".format(token, hostname, uid)
fp.write(line + os.linesep)
return token.rstrip()
def getCA():
ca_file = "{}/certs/ca.crt".format(snapdata_path)
with open(ca_file) as fp:
ca = fp.read()
return ca
def get_arg(key, file):
filename = "{}/args/{}".format(snapdata_path, file)
with open(filename) as fp:
for _, line in enumerate(fp):
if line.startswith(key):
args = line.split(' ')
args = args[-1].split('=')
return args[-1].rstrip()
return None
def is_valid(token, token_type=cluster_tokens_file):
with open(token_type) as fp:
for _, line in enumerate(fp):
if line.startswith(token):
return True
return False
def read_kubelet_args_file(node=None):
filename = "{}/args/kubelet".format(snapdata_path)
with open(filename) as fp:
args = fp.read()
if node:
args = "{}--hostname-override {}".format(args, node)
return args
def get_node_ep(hostname, remote_addr):
try:
socket.gethostbyname(hostname)
return hostname
except socket.gaierror:
return remote_addr
return remote_addr
@app.route('/{}/join'.format(CLUSTER_API), methods=['POST'])
def join_node():
if request.headers['Content-Type'] == 'application/json':
token = request.json['token']
hostname = request.json['hostname']
port = request.json['port']
callback_token = request.json['callback']
else:
token = request.form['token']
hostname = request.form['hostname']
port = request.form['port']
callback_token = request.form['callback']
if not is_valid(token):
error_msg={"error": "Invalid token"}
return Response(json.dumps(error_msg), mimetype='application/json', status=500)
add_token_to_certs_request(token)
remove_token_from_file(token, cluster_tokens_file)
node_addr = get_node_ep(hostname, request.remote_addr)
node_ep = "{}:{}".format(node_addr, port)
store_callback_token(node_ep, callback_token)
ca = getCA()
etcd_ep = get_arg('--listen-client-urls', 'etcd')
api_port = get_arg('--secure-port', 'kube-apiserver')
proxy_token = get_token('kube-proxy')
kubelet_token = add_kubelet_token(hostname)
subprocess.check_call("systemctl restart snap.microk8s.daemon-apiserver.service".split())
if node_addr != hostname:
kubelet_args = read_kubelet_args_file(node_addr)
else:
kubelet_args = read_kubelet_args_file()
return jsonify(ca=ca,
etcd=etcd_ep,
kubeproxy=proxy_token,
apiport=api_port,
kubelet=kubelet_token,
kubelet_args=kubelet_args,
hostname_override=node_addr)
@app.route('/{}/sign-cert'.format(CLUSTER_API), methods=['POST'])
def sign_cert():
if request.headers['Content-Type'] == 'application/json':
token = request.json['token']
cert_request = request.json['request']
else:
token = request.form['token']
cert_request = request.form['request']
if not is_valid(token, certs_request_tokens_file):
error_msg={"error": "Invalid token"}
return Response(json.dumps(error_msg), mimetype='application/json', status=500)
remove_token_from_file(token, certs_request_tokens_file)
signed_cert = sign_client_cert(cert_request, token)
return jsonify(certificate=signed_cert)
@app.route('/{}/configure'.format(CLUSTER_API), methods=['POST'])
def configure():
if request.headers['Content-Type'] == 'application/json':
callback_token = request.json['callback']
configuration = request.json
else:
callback_token = request.form['callback']
configuration = json.loads(request.form['configuration'])
if not is_valid(callback_token, callback_token_file):
error_msg={"error": "Invalid token"}
return Response(json.dumps(error_msg), mimetype='application/json', status=500)
if "service" in configuration:
for service in configuration["service"]:
print("{}".format(service["name"]))
if "arguments_update" in service:
print("Updating arguments")
for argument in service["arguments_update"]:
for key, val in argument.items():
print("{} is {}".format(key, val))
update_service_argument(service["name"], key, val)
if "arguments_remove" in service:
print("Removing arguments")
for argument in service["arguments_remove"]:
print("{}".format(argument))
update_service_argument(service["name"], argument, None)
if "restart" in service and service["restart"]:
service_name = get_service_name(service["name"])
print("restarting {}".format(service["name"]))
subprocess.check_call("systemctl restart snap.microk8s.daemon-{}.service".format(service_name).split())
if "addon" in configuration:
for addon in configuration["addon"]:
print("{}".format(addon["name"]))
if "enable" in addon and addon["enable"]:
print("Enabling {}".format(addon["name"]))
subprocess.check_call("{}/microk8s-enable.wrapper {}".format(snap_path, addon["name"]).split())
if "disable" in addon and addon["disable"]:
print("Disabling {}".format(addon["name"]))
subprocess.check_call("{}/microk8s-disable.wrapper {}".format(snap_path, addon["name"]).split())
resp_date = {"result": "ok"}
resp = Response(json.dumps(resp_date), status=200, mimetype='application/json')
return resp
def usage():
print("Agent responsible for setting up a cluster. Arguments:")
print("-l, --listen: interfaces to listen to (defaults to {})".format(default_listen_interface))
print("-p, --port: port to listen to (default {})".format(default_port))
if __name__ == '__main__':
server_cert = "{SNAP_DATA}/certs/server.crt".format(SNAP_DATA=snapdata_path)
server_key = "{SNAP_DATA}/certs/server.key".format(SNAP_DATA=snapdata_path)
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "hl:p:", ["help", "listen=", "port="])
except getopt.GetoptError as err:
print(err)
usage()
sys.exit(2)
port = default_port
listen = default_listen_interface
for o, a in opts:
if o in ("-l", "--listen"):
listen = a
if o in ("-p", "--port"):
port = a
elif o in ("-h", "--help"):
usage()
sys.exit(1)
else:
assert False, "unhandled option"
app.run(host=listen, port=port, ssl_context=(server_cert, server_key))
| true | true |
f721099fd7f552499a35dce11281e52eec0ef465 | 887 | py | Python | OpenCV/Glyph/fontReplacePixel.py | GaryMK/Machine-Learning | 0eb89ed4c6ea712f518741fdcc63f1b2109b4212 | [
"MIT"
] | 1 | 2021-03-12T07:46:00.000Z | 2021-03-12T07:46:00.000Z | OpenCV/Glyph/fontReplacePixel.py | GaryMK/Kaggle | 0eb89ed4c6ea712f518741fdcc63f1b2109b4212 | [
"MIT"
] | null | null | null | OpenCV/Glyph/fontReplacePixel.py | GaryMK/Kaggle | 0eb89ed4c6ea712f518741fdcc63f1b2109b4212 | [
"MIT"
] | null | null | null | # @author: GaryMK
# @EMAIL: chenxingmk@gmail.com
# @Date: 2021/2/14 0:28
# @Version: 1.0
# @Description:
from PIL import Image, ImageDraw, ImageFont
import cv2
import os
def draw(pic):
img = cv2.imread('source/' + pic)
img = img[:, :, (2, 1, 0)]
blank = Image.new("RGB", [len(img[0]), len(img)], "white")
drawObj = ImageDraw.Draw(blank)
n = 10
font = ImageFont.truetype('C:/Windows/Fonts/Microsoft YaHei UI/msyhbd.ttc', size=n - 1)
for i in range(0, len(img), n):
for j in range(0, len(img[i]), n):
text = '晨星'
drawObj.ink = img[i][j][0] + img[i][j][1] * 256 + img[i][j][2] * 256 * 256
drawObj.text([j, i], text[int(j / n) % len(text)], font=font)
print('完成处理——', i, j)
blank.save('replaced/replaced_' + pic, 'jpeg')
filelist = os.listdir('source')
for file in filelist:
draw(file)
| 25.342857 | 91 | 0.563698 |
from PIL import Image, ImageDraw, ImageFont
import cv2
import os
def draw(pic):
img = cv2.imread('source/' + pic)
img = img[:, :, (2, 1, 0)]
blank = Image.new("RGB", [len(img[0]), len(img)], "white")
drawObj = ImageDraw.Draw(blank)
n = 10
font = ImageFont.truetype('C:/Windows/Fonts/Microsoft YaHei UI/msyhbd.ttc', size=n - 1)
for i in range(0, len(img), n):
for j in range(0, len(img[i]), n):
text = '晨星'
drawObj.ink = img[i][j][0] + img[i][j][1] * 256 + img[i][j][2] * 256 * 256
drawObj.text([j, i], text[int(j / n) % len(text)], font=font)
print('完成处理——', i, j)
blank.save('replaced/replaced_' + pic, 'jpeg')
filelist = os.listdir('source')
for file in filelist:
draw(file)
| true | true |
f7210a163a4280e095d1c9a4bc619202c8d534a1 | 29 | py | Python | nlpblock/model/__init__.py | graykode/nlpblock | d7cd9e6d7a0ee401b8fecdbbf3a0ac60bdb3c0d7 | [
"MIT"
] | 3 | 2019-02-27T13:41:26.000Z | 2021-05-13T07:02:39.000Z | nlpblock/model/__init__.py | graykode/nlpblock | d7cd9e6d7a0ee401b8fecdbbf3a0ac60bdb3c0d7 | [
"MIT"
] | null | null | null | nlpblock/model/__init__.py | graykode/nlpblock | d7cd9e6d7a0ee401b8fecdbbf3a0ac60bdb3c0d7 | [
"MIT"
] | 3 | 2019-03-02T02:19:46.000Z | 2021-10-03T18:46:52.000Z | from nlpblock.model import *
| 14.5 | 28 | 0.793103 | from nlpblock.model import *
| true | true |
f7210a7be7a7a9686e849af8805af4b5236ca87c | 1,558 | py | Python | Code/finance.py | Naghipourfar/TraderBot | 2604c9df7af7394dfab6a54ea9a65a1b0df6a0ce | [
"MIT"
] | 3 | 2019-02-06T09:45:39.000Z | 2022-01-15T04:48:07.000Z | Code/finance.py | Naghipourfar/TraderBot | 2604c9df7af7394dfab6a54ea9a65a1b0df6a0ce | [
"MIT"
] | null | null | null | Code/finance.py | Naghipourfar/TraderBot | 2604c9df7af7394dfab6a54ea9a65a1b0df6a0ce | [
"MIT"
] | 1 | 2020-01-07T05:20:24.000Z | 2020-01-07T05:20:24.000Z | import numpy as np
import pandas as pd
from pandas_datareader import data
import tensorflow as tf
import matplotlib.pyplot as plt
import keras
from keras.layers import Input, Dense, Dropout, BatchNormalization
from keras.models import Model
from keras.callbacks import History, CSVLogger
"""
Created by Mohsen Naghipourfar on 7/23/18.
Email : mn7697np@gmail.com or naghipourfar@ce.sharif.edu
Website: http://ce.sharif.edu/~naghipourfar
Github: https://github.com/naghipourfar
Skype: mn7697np
"""
tickers = ['AAPL', 'MSFT', '^GSPC'] # Apple, Microsoft and S&P500 index
# We would like all available data from 01/01/2000 until 12/31/2016.
start_date = '2010-01-01'
end_date = '2016-12-31'
panel_data = data.DataReader('INPX', 'google', start_date, end_date)
''' returns a panel object (3D Object)
1st dim: various fields of finance -> open, close, high, low, ...
2nd dim: date
3rd dim: instrument identifiers
'''
# df_data = panel_data.to_frame()
all_weekdays = pd.date_range(start_date, end_date, freq='B')
close = panel_data['close']
close = close.reindex(all_weekdays)
close = close.fillna(method='ffill')
short_rolling = close.rolling(window=20).mean()
long_rolling = close.rolling(window=100).mean()
fig, ax = plt.subplots(figsize=(16,9))
ax.plot(close.index, close, label='close')
ax.plot(short_rolling.index, short_rolling, label='20 days rolling')
ax.plot(long_rolling.index, long_rolling, label='100 days rolling')
ax.set_xlabel('Date')
ax.set_ylabel('Adjusted closing price ($)')
ax.legend()
plt.show()
| 28.327273 | 72 | 0.734275 | import numpy as np
import pandas as pd
from pandas_datareader import data
import tensorflow as tf
import matplotlib.pyplot as plt
import keras
from keras.layers import Input, Dense, Dropout, BatchNormalization
from keras.models import Model
from keras.callbacks import History, CSVLogger
tickers = ['AAPL', 'MSFT', '^GSPC']
start_date = '2010-01-01'
end_date = '2016-12-31'
panel_data = data.DataReader('INPX', 'google', start_date, end_date)
all_weekdays = pd.date_range(start_date, end_date, freq='B')
close = panel_data['close']
close = close.reindex(all_weekdays)
close = close.fillna(method='ffill')
short_rolling = close.rolling(window=20).mean()
long_rolling = close.rolling(window=100).mean()
fig, ax = plt.subplots(figsize=(16,9))
ax.plot(close.index, close, label='close')
ax.plot(short_rolling.index, short_rolling, label='20 days rolling')
ax.plot(long_rolling.index, long_rolling, label='100 days rolling')
ax.set_xlabel('Date')
ax.set_ylabel('Adjusted closing price ($)')
ax.legend()
plt.show()
| true | true |
f7210a7f9de0f160b00a0a52aaf0e082c37d647d | 1,685 | py | Python | lib/lib_apscheduler.py | ZhaoUncle/skstack | 9e00305f50fdd60125ec37884247b94b70a9020c | [
"Apache-2.0"
] | null | null | null | lib/lib_apscheduler.py | ZhaoUncle/skstack | 9e00305f50fdd60125ec37884247b94b70a9020c | [
"Apache-2.0"
] | null | null | null | lib/lib_apscheduler.py | ZhaoUncle/skstack | 9e00305f50fdd60125ec37884247b94b70a9020c | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2018年6月19日 @author: encodingl
'''
import time
import datetime
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.schedulers.background import BackgroundScheduler
def job1(f):
print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())), f)
def job2(args1, args2, f):
print(f, args1, args2)
def job3(**args):
print(args)
'''
APScheduler支持以下三种定时任务:
cron: crontab类型任务
interval: 固定时间间隔任务
date: 基于日期时间的一次性任务
'''
if __name__ == "__main__":
scheduler = BlockingScheduler()
#循环任务示例
scheduler.add_job(job1, 'interval', seconds=3, args=('循环',), id='test_job1')
#定时任务示例
scheduler.add_job(job1, 'cron', second='*/4', args=('定时',), id='test_job2')
#一次性任务示例
scheduler.add_job(job1, next_run_time=(datetime.datetime.now() + datetime.timedelta(seconds=5)), args=('一次',), id='test_job3')
'''
传递参数的方式有元组(tuple)、列表(list)、字典(dict)
注意:不过需要注意采用元组传递参数时后边需要多加一个逗号
'''
# #基于list
# scheduler.add_job(job2, 'interval', seconds=5, args=['a','b','list'], id='test_job4')
# #基于tuple
# scheduler.add_job(job2, 'interval', seconds=5, args=('a','b','tuple',), id='test_job5')
# #基于dict
# scheduler.add_job(job3, 'interval', seconds=5, kwargs={'f':'dict', 'a':1,'b':2}, id='test_job6')
#带有参数的示例
# scheduler.add_job(job2, 'interval', seconds=5, args=['a','b'], id='test_job7')
# scheduler.add_job(job2, 'interval', seconds=5, args=('a','b',), id='test_job8')
# scheduler.add_job(job3, 'interval', seconds=5, kwargs={'a':1,'b':2}, id='test_job9')
print(scheduler.get_jobs())
scheduler.start()
| 29.051724 | 130 | 0.645697 |
import time
import datetime
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.schedulers.background import BackgroundScheduler
def job1(f):
print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())), f)
def job2(args1, args2, f):
print(f, args1, args2)
def job3(**args):
print(args)
if __name__ == "__main__":
scheduler = BlockingScheduler()
scheduler.add_job(job1, 'interval', seconds=3, args=('循环',), id='test_job1')
scheduler.add_job(job1, 'cron', second='*/4', args=('定时',), id='test_job2')
scheduler.add_job(job1, next_run_time=(datetime.datetime.now() + datetime.timedelta(seconds=5)), args=('一次',), id='test_job3')
print(scheduler.get_jobs())
scheduler.start()
| true | true |
f7210b036da2023fc30a4f620fdbe6743b369a69 | 4,058 | py | Python | movienightbot/db/models.py | squirrelo/MovieNightBot | 53fad77d533f13587d47d64fe7583db55529184a | [
"WTFPL"
] | 3 | 2020-02-22T14:22:21.000Z | 2021-02-04T19:44:38.000Z | movienightbot/db/models.py | squirrelo/MovieNightBot | 53fad77d533f13587d47d64fe7583db55529184a | [
"WTFPL"
] | 42 | 2020-02-10T03:42:29.000Z | 2022-02-12T23:43:43.000Z | movienightbot/db/models.py | squirrelo/MovieNightBot | 53fad77d533f13587d47d64fe7583db55529184a | [
"WTFPL"
] | 3 | 2020-02-14T23:22:24.000Z | 2020-06-06T21:00:14.000Z | import datetime
import peewee as pw
from . import BaseModel
class Server(BaseModel):
id = pw.IntegerField(primary_key=True)
channel = pw.IntegerField(null=False)
movie_time = pw.TimeField(null=False, formats="%H:%M", default="12:00")
admin_role = pw.TextField(null=False, default="Movie Master")
tie_option = pw.TextField(null=False, default="breaker")
num_movies_per_vote = pw.SmallIntegerField(null=False, default=8)
num_votes_per_user = pw.SmallIntegerField(null=False, default=4)
block_suggestions = pw.BooleanField(null=False, default=False)
check_movie_names = pw.BooleanField(null=False, default=False)
message_timeout = pw.SmallIntegerField(null=False, default=10)
allow_tv_shows = pw.BooleanField(null=False, default=False)
class Meta:
table_name = "servers"
class IMDBInfo(BaseModel):
imdb_id = pw.TextField(primary_key=True)
title = pw.TextField(null=False)
canonical_title = pw.TextField()
year = pw.IntegerField()
thumbnail_poster_url = pw.TextField()
full_size_poster_url = pw.TextField()
class Meta:
table_name = "imdb_info"
class Movie(BaseModel):
id = pw.AutoField(primary_key=True)
server = pw.ForeignKeyField(Server, backref="movies")
movie_name = pw.TextField(null=False)
suggested_by = pw.TextField(null=False)
last_score = pw.FloatField(null=True)
num_votes_entered = pw.IntegerField(null=False, default=0)
total_score = pw.FloatField(null=False, default=0.0)
total_votes = pw.IntegerField(null=False, default=0)
suggested_on = pw.TimestampField(
utc=True, null=False, default=datetime.datetime.utcnow
)
watched_on = pw.TimestampField(utc=True, null=True, default=None)
imdb_id = pw.ForeignKeyField(IMDBInfo, backref="movie_suggestions", null=True)
class Meta:
table_name = "movies"
indexes = (
# create a unique index on server and movie name
(("server", "movie_name"), True),
)
# Genre linked to Movie and not IMDBInfo because this allows non-IMDB servers to still manually add genres to movies
# and do votes by genre
class MovieGenre(BaseModel):
movie_id = pw.ForeignKeyField(Movie, backref="movie_genres")
genre = pw.TextField(null=False, index=True)
class Meta:
table_name = "movie_genre"
indexes = (
# create a unique index on movie and genre
(("movie_id", "genre"), True),
)
class Vote(BaseModel):
"""Tracks the actual vote going on in a server"""
server_id = pw.ForeignKeyField(Server, backref="vote", primary_key=True)
message_id = pw.IntegerField(
null=True, help_text="The message ID holding the vote message on the server"
)
channel_id = pw.IntegerField(
null=True, help_text="The channel ID holding the vote channel on the server"
)
class Meta:
table_name = "votes"
class MovieVote(BaseModel):
"""Tracks the movies selected for voting on"""
id = pw.AutoField(primary_key=True)
vote = pw.ForeignKeyField(Vote, backref="movie_votes")
movie = pw.ForeignKeyField(Movie, backref="+")
score = pw.FloatField(null=False, default=0)
emoji = pw.TextField(null=False)
class Meta:
tablename = "movie_votes"
indexes = (
# create a unique index on vote and movie
(("vote", "movie"), True),
)
class UserVote(BaseModel):
"""Tracks the ranked votes of a user"""
id = pw.AutoField(primary_key=True)
movie_vote = pw.ForeignKeyField(MovieVote, backref="user_votes")
user_id = pw.IntegerField(null=False)
user_name = pw.TextField(null=False)
vote_rank = pw.SmallIntegerField(
null=False,
help_text="The numbered vote for the user, 1 is highest rank. Useful for ranked-choice voting",
)
class Meta:
tablename = "user_votes"
indexes = (
# create a unique index on movie, user, and rank
(("movie_vote", "user_id", "vote_rank"), True),
)
| 32.99187 | 116 | 0.673238 | import datetime
import peewee as pw
from . import BaseModel
class Server(BaseModel):
id = pw.IntegerField(primary_key=True)
channel = pw.IntegerField(null=False)
movie_time = pw.TimeField(null=False, formats="%H:%M", default="12:00")
admin_role = pw.TextField(null=False, default="Movie Master")
tie_option = pw.TextField(null=False, default="breaker")
num_movies_per_vote = pw.SmallIntegerField(null=False, default=8)
num_votes_per_user = pw.SmallIntegerField(null=False, default=4)
block_suggestions = pw.BooleanField(null=False, default=False)
check_movie_names = pw.BooleanField(null=False, default=False)
message_timeout = pw.SmallIntegerField(null=False, default=10)
allow_tv_shows = pw.BooleanField(null=False, default=False)
class Meta:
table_name = "servers"
class IMDBInfo(BaseModel):
imdb_id = pw.TextField(primary_key=True)
title = pw.TextField(null=False)
canonical_title = pw.TextField()
year = pw.IntegerField()
thumbnail_poster_url = pw.TextField()
full_size_poster_url = pw.TextField()
class Meta:
table_name = "imdb_info"
class Movie(BaseModel):
id = pw.AutoField(primary_key=True)
server = pw.ForeignKeyField(Server, backref="movies")
movie_name = pw.TextField(null=False)
suggested_by = pw.TextField(null=False)
last_score = pw.FloatField(null=True)
num_votes_entered = pw.IntegerField(null=False, default=0)
total_score = pw.FloatField(null=False, default=0.0)
total_votes = pw.IntegerField(null=False, default=0)
suggested_on = pw.TimestampField(
utc=True, null=False, default=datetime.datetime.utcnow
)
watched_on = pw.TimestampField(utc=True, null=True, default=None)
imdb_id = pw.ForeignKeyField(IMDBInfo, backref="movie_suggestions", null=True)
class Meta:
table_name = "movies"
indexes = (
(("server", "movie_name"), True),
)
class MovieGenre(BaseModel):
movie_id = pw.ForeignKeyField(Movie, backref="movie_genres")
genre = pw.TextField(null=False, index=True)
class Meta:
table_name = "movie_genre"
indexes = (
(("movie_id", "genre"), True),
)
class Vote(BaseModel):
server_id = pw.ForeignKeyField(Server, backref="vote", primary_key=True)
message_id = pw.IntegerField(
null=True, help_text="The message ID holding the vote message on the server"
)
channel_id = pw.IntegerField(
null=True, help_text="The channel ID holding the vote channel on the server"
)
class Meta:
table_name = "votes"
class MovieVote(BaseModel):
id = pw.AutoField(primary_key=True)
vote = pw.ForeignKeyField(Vote, backref="movie_votes")
movie = pw.ForeignKeyField(Movie, backref="+")
score = pw.FloatField(null=False, default=0)
emoji = pw.TextField(null=False)
class Meta:
tablename = "movie_votes"
indexes = (
(("vote", "movie"), True),
)
class UserVote(BaseModel):
id = pw.AutoField(primary_key=True)
movie_vote = pw.ForeignKeyField(MovieVote, backref="user_votes")
user_id = pw.IntegerField(null=False)
user_name = pw.TextField(null=False)
vote_rank = pw.SmallIntegerField(
null=False,
help_text="The numbered vote for the user, 1 is highest rank. Useful for ranked-choice voting",
)
class Meta:
tablename = "user_votes"
indexes = (
(("movie_vote", "user_id", "vote_rank"), True),
)
| true | true |
f7210b6d933a1774a42b9590a91353ac70a354f7 | 5,252 | py | Python | euler/large_sum.py | lsbardel/mathfun | 98e7c210409c2b5777e91059c3651cef4f3045dd | [
"BSD-3-Clause"
] | null | null | null | euler/large_sum.py | lsbardel/mathfun | 98e7c210409c2b5777e91059c3651cef4f3045dd | [
"BSD-3-Clause"
] | null | null | null | euler/large_sum.py | lsbardel/mathfun | 98e7c210409c2b5777e91059c3651cef4f3045dd | [
"BSD-3-Clause"
] | null | null | null | example = '''
37107287533902102798797998220837590246510135740250
46376937677490009712648124896970078050417018260538
74324986199524741059474233309513058123726617309629
91942213363574161572522430563301811072406154908250
23067588207539346171171980310421047513778063246676
89261670696623633820136378418383684178734361726757
28112879812849979408065481931592621691275889832738
44274228917432520321923589422876796487670272189318
47451445736001306439091167216856844588711603153276
70386486105843025439939619828917593665686757934951
62176457141856560629502157223196586755079324193331
64906352462741904929101432445813822663347944758178
92575867718337217661963751590579239728245598838407
58203565325359399008402633568948830189458628227828
80181199384826282014278194139940567587151170094390
35398664372827112653829987240784473053190104293586
86515506006295864861532075273371959191420517255829
71693888707715466499115593487603532921714970056938
54370070576826684624621495650076471787294438377604
53282654108756828443191190634694037855217779295145
36123272525000296071075082563815656710885258350721
45876576172410976447339110607218265236877223636045
17423706905851860660448207621209813287860733969412
81142660418086830619328460811191061556940512689692
51934325451728388641918047049293215058642563049483
62467221648435076201727918039944693004732956340691
15732444386908125794514089057706229429197107928209
55037687525678773091862540744969844508330393682126
18336384825330154686196124348767681297534375946515
80386287592878490201521685554828717201219257766954
78182833757993103614740356856449095527097864797581
16726320100436897842553539920931837441497806860984
48403098129077791799088218795327364475675590848030
87086987551392711854517078544161852424320693150332
59959406895756536782107074926966537676326235447210
69793950679652694742597709739166693763042633987085
41052684708299085211399427365734116182760315001271
65378607361501080857009149939512557028198746004375
35829035317434717326932123578154982629742552737307
94953759765105305946966067683156574377167401875275
88902802571733229619176668713819931811048770190271
25267680276078003013678680992525463401061632866526
36270218540497705585629946580636237993140746255962
24074486908231174977792365466257246923322810917141
91430288197103288597806669760892938638285025333403
34413065578016127815921815005561868836468420090470
23053081172816430487623791969842487255036638784583
11487696932154902810424020138335124462181441773470
63783299490636259666498587618221225225512486764533
67720186971698544312419572409913959008952310058822
95548255300263520781532296796249481641953868218774
76085327132285723110424803456124867697064507995236
37774242535411291684276865538926205024910326572967
23701913275725675285653248258265463092207058596522
29798860272258331913126375147341994889534765745501
18495701454879288984856827726077713721403798879715
38298203783031473527721580348144513491373226651381
34829543829199918180278916522431027392251122869539
40957953066405232632538044100059654939159879593635
29746152185502371307642255121183693803580388584903
41698116222072977186158236678424689157993532961922
62467957194401269043877107275048102390895523597457
23189706772547915061505504953922979530901129967519
86188088225875314529584099251203829009407770775672
11306739708304724483816533873502340845647058077308
82959174767140363198008187129011875491310547126581
97623331044818386269515456334926366572897563400500
42846280183517070527831839425882145521227251250327
55121603546981200581762165212827652751691296897789
32238195734329339946437501907836945765883352399886
75506164965184775180738168837861091527357929701337
62177842752192623401942399639168044983993173312731
32924185707147349566916674687634660915035914677504
99518671430235219628894890102423325116913619626622
73267460800591547471830798392868535206946944540724
76841822524674417161514036427982273348055556214818
97142617910342598647204516893989422179826088076852
87783646182799346313767754307809363333018982642090
10848802521674670883215120185883543223812876952786
71329612474782464538636993009049310363619763878039
62184073572399794223406235393808339651327408011116
66627891981488087797941876876144230030984490851411
60661826293682836764744779239180335110989069790714
85786944089552990653640447425576083659976645795096
66024396409905389607120198219976047599490197230297
64913982680032973156037120041377903785566085089252
16730939319872750275468906903707539413042652315011
94809377245048795150954100921645863754710598436791
78639167021187492431995700641917969777599028300699
15368713711936614952811305876380278410754449733078
40789923115535562561142322423255033685442488917353
44889911501440648020369068063960672322193204149535
41503128880339536053299340368006977710650566631954
81234880673210146739058568557934581403627822703280
82616570773948327592232845941706525094512325230608
22918802058777319719839450180888072429661980811197
77158542502016545090413245809786882778948721859617
72107838435069186155435662884062257473692284509516
20849603980134001723930671666823555245252804609722
53503534226472524250874054075591789781264330331690'''
if __name__ == '__main__':
numbers = example.split('\n')
v = sum((int(n) for n in numbers if n))
print(int(str(v)[:10]))
| 48.62963 | 53 | 0.967822 | example = '''
37107287533902102798797998220837590246510135740250
46376937677490009712648124896970078050417018260538
74324986199524741059474233309513058123726617309629
91942213363574161572522430563301811072406154908250
23067588207539346171171980310421047513778063246676
89261670696623633820136378418383684178734361726757
28112879812849979408065481931592621691275889832738
44274228917432520321923589422876796487670272189318
47451445736001306439091167216856844588711603153276
70386486105843025439939619828917593665686757934951
62176457141856560629502157223196586755079324193331
64906352462741904929101432445813822663347944758178
92575867718337217661963751590579239728245598838407
58203565325359399008402633568948830189458628227828
80181199384826282014278194139940567587151170094390
35398664372827112653829987240784473053190104293586
86515506006295864861532075273371959191420517255829
71693888707715466499115593487603532921714970056938
54370070576826684624621495650076471787294438377604
53282654108756828443191190634694037855217779295145
36123272525000296071075082563815656710885258350721
45876576172410976447339110607218265236877223636045
17423706905851860660448207621209813287860733969412
81142660418086830619328460811191061556940512689692
51934325451728388641918047049293215058642563049483
62467221648435076201727918039944693004732956340691
15732444386908125794514089057706229429197107928209
55037687525678773091862540744969844508330393682126
18336384825330154686196124348767681297534375946515
80386287592878490201521685554828717201219257766954
78182833757993103614740356856449095527097864797581
16726320100436897842553539920931837441497806860984
48403098129077791799088218795327364475675590848030
87086987551392711854517078544161852424320693150332
59959406895756536782107074926966537676326235447210
69793950679652694742597709739166693763042633987085
41052684708299085211399427365734116182760315001271
65378607361501080857009149939512557028198746004375
35829035317434717326932123578154982629742552737307
94953759765105305946966067683156574377167401875275
88902802571733229619176668713819931811048770190271
25267680276078003013678680992525463401061632866526
36270218540497705585629946580636237993140746255962
24074486908231174977792365466257246923322810917141
91430288197103288597806669760892938638285025333403
34413065578016127815921815005561868836468420090470
23053081172816430487623791969842487255036638784583
11487696932154902810424020138335124462181441773470
63783299490636259666498587618221225225512486764533
67720186971698544312419572409913959008952310058822
95548255300263520781532296796249481641953868218774
76085327132285723110424803456124867697064507995236
37774242535411291684276865538926205024910326572967
23701913275725675285653248258265463092207058596522
29798860272258331913126375147341994889534765745501
18495701454879288984856827726077713721403798879715
38298203783031473527721580348144513491373226651381
34829543829199918180278916522431027392251122869539
40957953066405232632538044100059654939159879593635
29746152185502371307642255121183693803580388584903
41698116222072977186158236678424689157993532961922
62467957194401269043877107275048102390895523597457
23189706772547915061505504953922979530901129967519
86188088225875314529584099251203829009407770775672
11306739708304724483816533873502340845647058077308
82959174767140363198008187129011875491310547126581
97623331044818386269515456334926366572897563400500
42846280183517070527831839425882145521227251250327
55121603546981200581762165212827652751691296897789
32238195734329339946437501907836945765883352399886
75506164965184775180738168837861091527357929701337
62177842752192623401942399639168044983993173312731
32924185707147349566916674687634660915035914677504
99518671430235219628894890102423325116913619626622
73267460800591547471830798392868535206946944540724
76841822524674417161514036427982273348055556214818
97142617910342598647204516893989422179826088076852
87783646182799346313767754307809363333018982642090
10848802521674670883215120185883543223812876952786
71329612474782464538636993009049310363619763878039
62184073572399794223406235393808339651327408011116
66627891981488087797941876876144230030984490851411
60661826293682836764744779239180335110989069790714
85786944089552990653640447425576083659976645795096
66024396409905389607120198219976047599490197230297
64913982680032973156037120041377903785566085089252
16730939319872750275468906903707539413042652315011
94809377245048795150954100921645863754710598436791
78639167021187492431995700641917969777599028300699
15368713711936614952811305876380278410754449733078
40789923115535562561142322423255033685442488917353
44889911501440648020369068063960672322193204149535
41503128880339536053299340368006977710650566631954
81234880673210146739058568557934581403627822703280
82616570773948327592232845941706525094512325230608
22918802058777319719839450180888072429661980811197
77158542502016545090413245809786882778948721859617
72107838435069186155435662884062257473692284509516
20849603980134001723930671666823555245252804609722
53503534226472524250874054075591789781264330331690'''
if __name__ == '__main__':
numbers = example.split('\n')
v = sum((int(n) for n in numbers if n))
print(int(str(v)[:10]))
| true | true |
f7210bd42ee9a00a5539402b91e5c99cc41cade9 | 2,341 | py | Python | examples/dfp/v201505/user_team_association_service/get_user_team_associations_for_user.py | wbrp/googleads-python-lib | c0f8ce6c4acfe88ce8f913a4f0e0e92b548e1022 | [
"Apache-2.0"
] | 1 | 2020-05-23T11:32:32.000Z | 2020-05-23T11:32:32.000Z | examples/dfp/v201505/user_team_association_service/get_user_team_associations_for_user.py | cmm08/googleads-python-lib | 97743df32eff92cf00cb8beaddcda42dfa0a37f4 | [
"Apache-2.0"
] | null | null | null | examples/dfp/v201505/user_team_association_service/get_user_team_associations_for_user.py | cmm08/googleads-python-lib | 97743df32eff92cf00cb8beaddcda42dfa0a37f4 | [
"Apache-2.0"
] | 2 | 2018-04-20T02:16:33.000Z | 2020-11-12T20:58:54.000Z | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all user team associations for a single user.
To determine which users exist, run get_all_users.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfp
USER_ID = 'INSERT_USER_ID_HERE'
def main(client, user_id):
# Initialize appropriate service.
user_team_association_service = client.GetService(
'UserTeamAssociationService', version='v201505')
# Create query.
values = [{
'key': 'userId',
'value': {
'xsi_type': 'NumberValue',
'value': user_id
}
}]
query = 'WHERE userId = :userId'
# Create a filter statement.
statement = dfp.FilterStatement(query, values)
# Get user team associations by statement.
while True:
response = user_team_association_service.getUserTeamAssociationsByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for user_team_association in response['results']:
print ('User team association between user with ID \'%s\' and team with'
' ID \'%s\' was found.' % (user_team_association['userId'],
user_team_association['teamId']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, USER_ID)
| 32.068493 | 80 | 0.706963 |
"""This example gets all user team associations for a single user.
To determine which users exist, run get_all_users.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import dfp
USER_ID = 'INSERT_USER_ID_HERE'
def main(client, user_id):
user_team_association_service = client.GetService(
'UserTeamAssociationService', version='v201505')
values = [{
'key': 'userId',
'value': {
'xsi_type': 'NumberValue',
'value': user_id
}
}]
query = 'WHERE userId = :userId'
statement = dfp.FilterStatement(query, values)
while True:
response = user_team_association_service.getUserTeamAssociationsByStatement(
statement.ToStatement())
if 'results' in response:
for user_team_association in response['results']:
print ('User team association between user with ID \'%s\' and team with'
' ID \'%s\' was found.' % (user_team_association['userId'],
user_team_association['teamId']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, USER_ID)
| false | true |
f7210c49de22ec515aedef5c7f5415db79dc84ea | 21,828 | py | Python | recipes/openscenegraph/all/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 2 | 2021-08-12T06:17:58.000Z | 2021-09-07T23:12:25.000Z | recipes/openscenegraph/all/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 9 | 2020-01-21T08:27:51.000Z | 2021-01-23T19:21:46.000Z | recipes/openscenegraph/all/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 2 | 2021-05-12T10:37:57.000Z | 2021-12-15T13:38:16.000Z | from conans import CMake, ConanFile, tools
from conans.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.29.1"
class OpenSceneGraphConanFile(ConanFile):
name = "openscenegraph"
description = "OpenSceneGraph is an open source high performance 3D graphics toolkit"
topics = ("openscenegraph", "graphics")
url = "https://github.com/conan-io/conan-center-index"
homepage = "http://www.openscenegraph.org"
license = "LGPL-2.1-only", "WxWindows-exception-3.1"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"build_applications": [True, False],
"enable_notify": [True, False],
"enable_deprecated_api": [True, False],
"enable_readfile": [True, False],
"enable_ref_ptr_implicit_output_conversion": [True, False],
"enable_ref_ptr_safe_dereference": [True, False],
"enable_envvar_support": [True, False],
"enable_windowing_system": [True, False],
"enable_deprecated_serializers": [True, False],
"use_fontconfig": [True, False],
"with_asio": [True, False],
"with_curl": [True, False],
"with_dcmtk": [True, False],
"with_freetype": [True, False],
"with_gdal": [True, False],
"with_gif": [True, False],
"with_gta": [True, False],
"with_jasper": [True, False],
"with_jpeg": [True, False],
"with_openexr": [True, False],
"with_png": [True, False],
"with_tiff": [True, False],
"with_zlib": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"build_applications": False,
"enable_notify": True,
"enable_deprecated_api": False,
"enable_readfile": True,
"enable_ref_ptr_implicit_output_conversion": True,
"enable_ref_ptr_safe_dereference": True,
"enable_envvar_support": True,
"enable_windowing_system": True,
"enable_deprecated_serializers": False,
"use_fontconfig": True,
"with_asio": False,
"with_curl": False,
"with_dcmtk": False,
"with_freetype": True,
"with_gdal": False,
"with_gif": True,
"with_gta": False,
"with_jasper": False,
"with_jpeg": True,
"with_openexr": False,
"with_png": True,
"with_tiff": True,
"with_zlib": True,
}
short_paths = True
exports_sources = "CMakeLists.txt", "patches/*.patch"
generators = "cmake", "cmake_find_package"
@property
def _source_subfolder(self):
return "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
del self.options.with_asio
# Default to false with fontconfig until it is supported on Windows
self.options.use_fontconfig = False
if tools.is_apple_os(self.settings.os):
# osg uses imageio on Apple platforms
del self.options.with_gif
del self.options.with_jpeg
del self.options.with_png
# imageio supports tiff files so the tiff plugin isn't needed on Apple platforms
self.options.with_tiff = False
def configure(self):
if self.options.shared:
del self.options.fPIC
if not self.options.with_zlib:
# These require zlib support
del self.options.with_openexr
del self.options.with_png
del self.options.with_dcmtk
def validate(self):
if self.options.get_safe("with_asio", False):
raise ConanInvalidConfiguration("ASIO support in OSG is broken, see https://github.com/openscenegraph/OpenSceneGraph/issues/921")
if hasattr(self, "settings_build") and tools.cross_building(self):
raise ConanInvalidConfiguration("openscenegraph recipe cannot be cross-built yet. Contributions are welcome.")
def requirements(self):
if self.options.enable_windowing_system and self.settings.os == "Linux":
self.requires("xorg/system")
self.requires("opengl/system")
if self.options.use_fontconfig:
self.requires("fontconfig/2.13.93")
if self.options.get_safe("with_asio", False):
# Should these be private requires?
self.requires("asio/1.18.1")
self.requires("boost/1.75.0")
if self.options.with_curl:
self.requires("libcurl/7.74.0")
if self.options.get_safe("with_dcmtk"):
self.requires("dcmtk/3.6.5")
if self.options.with_freetype:
self.requires("freetype/2.10.4")
if self.options.with_gdal:
self.requires("gdal/3.1.4")
if self.options.get_safe("with_gif"):
self.requires("giflib/5.2.1")
if self.options.with_gta:
self.requires("libgta/1.2.1")
if self.options.with_jasper:
self.requires("jasper/2.0.24")
if self.options.get_safe("with_jpeg"):
self.requires("libjpeg/9d")
if self.options.get_safe("with_openexr"):
self.requires("openexr/2.5.3")
if self.options.get_safe("with_png"):
self.requires("libpng/1.6.37")
if self.options.with_tiff:
self.requires("libtiff/4.2.0")
if self.options.with_zlib:
self.requires("zlib/1.2.11")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
strip_root=True, destination=self._source_subfolder)
def _patch_sources(self):
for patch in self.conan_data["patches"].get(self.version, []):
tools.patch(**patch)
for package in ("Fontconfig", "Freetype", "GDAL", "GIFLIB", "GTA", "Jasper", "OpenEXR"):
# Prefer conan's find package scripts over osg's
os.unlink(os.path.join(self._source_subfolder, "CMakeModules", "Find{}.cmake".format(package)))
def _configured_cmake(self):
if hasattr(self, "_cmake"):
return self._cmake
self._cmake = cmake = CMake(self)
cmake.definitions["USE_3RDPARTY_BIN"] = False
cmake.definitions["DYNAMIC_OPENSCENEGRAPH"] = self.options.shared
cmake.definitions["DYNAMIC_OPENTHREADS"] = self.options.shared
cmake.definitions["BUILD_OSG_APPLICATIONS"] = self.options.build_applications
cmake.definitions["BUILD_OSG_EXAMPLES"] = False
cmake.definitions["OSG_NOTIFY_DISABLED"] = not self.options.enable_notify
cmake.definitions["OSG_USE_DEPRECATED_API"] = self.options.enable_deprecated_api
cmake.definitions["OSG_PROVIDE_READFILE"] = self.options.enable_readfile
cmake.definitions["OSG_USE_REF_PTR_IMPLICIT_OUTPUT_CONVERSION"] = self.options.enable_ref_ptr_implicit_output_conversion
cmake.definitions["OSG_USE_REF_PTR_SAFE_DEREFERENCE"] = self.options.enable_ref_ptr_safe_dereference
cmake.definitions["OSG_ENVVAR_SUPPORTED"] = self.options.enable_envvar_support
if not self.options.enable_windowing_system:
cmake.definitions["OSG_WINDOWING_SYSTEM"] = None
cmake.definitions["BUILD_OSG_DEPRECATED_SERIALIZERS"] = self.options.enable_deprecated_serializers
cmake.definitions["OSG_TEXT_USE_FONTCONFIG"] = self.options.use_fontconfig
# Disable option dependencies unless we have a package for them
cmake.definitions["OSG_WITH_FREETYPE"] = self.options.with_freetype
cmake.definitions["OSG_WITH_OPENEXR"] = self.options.get_safe("with_openexr", False)
cmake.definitions["OSG_WITH_INVENTOR"] = False
cmake.definitions["OSG_WITH_JASPER"] = self.options.with_jasper
cmake.definitions["OSG_WITH_OPENCASCADE"] = False
cmake.definitions["OSG_WITH_FBX"] = False
cmake.definitions["OSG_WITH_ZLIB"] = self.options.with_zlib
cmake.definitions["OSG_WITH_GDAL"] = self.options.with_gdal
cmake.definitions["OSG_WITH_GTA"] = self.options.with_gta
cmake.definitions["OSG_WITH_CURL"] = self.options.with_curl
cmake.definitions["OSG_WITH_LIBVNCSERVER"] = False
cmake.definitions["OSG_WITH_DCMTK"] = self.options.get_safe("with_dcmtk", False)
cmake.definitions["OSG_WITH_FFMPEG"] = False
cmake.definitions["OSG_WITH_DIRECTSHOW"] = False
cmake.definitions["OSG_WITH_SDL"] = False
cmake.definitions["OSG_WITH_POPPLER"] = False
cmake.definitions["OSG_WITH_RSVG"] = False
cmake.definitions["OSG_WITH_NVTT"] = False
cmake.definitions["OSG_WITH_ASIO"] = self.options.get_safe("with_asio", False)
cmake.definitions["OSG_WITH_ZEROCONF"] = False
cmake.definitions["OSG_WITH_LIBLAS"] = False
cmake.definitions["OSG_WITH_GIF"] = self.options.get_safe("with_gif", False)
cmake.definitions["OSG_WITH_JPEG"] = self.options.get_safe("with_jpeg", False)
cmake.definitions["OSG_WITH_PNG"] = self.options.get_safe("with_png", False)
cmake.definitions["OSG_WITH_TIFF"] = self.options.with_tiff
if self.settings.os == "Windows":
# osg has optional quicktime support on Windows
cmake.definitions["CMAKE_DISABLE_FIND_PACKAGE_QuickTime"] = True
cmake.definitions["OSG_MSVC_VERSIONED_DLL"] = False
cmake.configure()
return cmake
def build(self):
self._patch_sources()
self._configured_cmake().build()
def package(self):
self._configured_cmake().install()
self.copy(pattern="LICENSE.txt", dst="licenses", src=self._source_subfolder)
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.remove_files_by_mask(self.package_folder, "*.pdb")
def package_info(self):
# FindOpenSceneGraph.cmake is shipped with cmake and is a traditional cmake script
# It doesn't setup targets and only provides a few variables:
# - OPENSCENEGRAPH_FOUND
# - OPENSCENEGRAPH_VERSION
# - OPENSCENEGRAPH_INCLUDE_DIRS
# - OPENSCENEGRAPH_LIBRARIES
# Unfortunately, the cmake_find_package generators don't currently allow directly setting variables,
# but it will set the last three of these if the name of the package is OPENSCENEGRAPH (it uses
# the filename for the first, so OpenSceneGraph_FOUND gets set, not OPENSCENEGRAPH_FOUND)
# TODO: set OPENSCENEGRAPH_FOUND in cmake_find_package and cmake_find_package_multi
self.cpp_info.filenames["cmake_find_package"] = "OpenSceneGraph"
self.cpp_info.filenames["cmake_find_package_multi"] = "OpenSceneGraph"
self.cpp_info.names["cmake_find_package"] = "OPENSCENEGRAPH"
self.cpp_info.names["cmake_find_package_multi"] = "OPENSCENEGRAPH"
if self.settings.build_type == "Debug":
postfix = "d"
elif self.settings.build_type == "RelWithDebInfo":
postfix = "rd"
elif self.settings.build_type == "MinSizeRel":
postfix = "s"
else:
postfix = ""
def setup_plugin(plugin):
lib = "osgdb_" + plugin
plugin_library = self.cpp_info.components[lib]
plugin_library.libs = [] if self.options.shared else [lib + postfix]
plugin_library.requires = ["OpenThreads", "osg", "osgDB", "osgUtil"]
if not self.options.shared:
plugin_library.libdirs = [os.path.join("lib", "osgPlugins-{}".format(self.version))]
return plugin_library
def setup_serializers(lib):
plugins = []
if lib not in ("osgDB", "osgWidget", "osgPresentation"):
plugins.append("serializers_{}".format(lib.lower()))
if self.options.enable_deprecated_serializers:
if lib not in ("osgUtil", "osgDB", "osgGA", "osgManipulator", "osgUI", "osgPresentation"):
plugins.append("deprecated_{}".format(lib.lower()))
for plugin in plugins:
setup_plugin(plugin).requires.append(lib)
def setup_library(lib):
library = self.cpp_info.components[lib]
library.libs = [lib + postfix]
library.names["pkg_config"] = "openscenegraph-{}".format(lib)
setup_serializers(lib)
return library
# Core libraries
# requires obtained from osg's source code
# TODO: FindOpenThreads.cmake is shipped with CMake, so we should generate separate
# files for it with cmake_find_package and cmake_find_package_multi
library = self.cpp_info.components["OpenThreads"]
library.libs = ["OpenThreads" + postfix]
library.names["pkg_config"] = "openthreads"
if self.settings.os == "Linux":
library.system_libs = ["pthread"]
library = setup_library("osg")
library.requires = ["OpenThreads", "opengl::opengl"]
if self.settings.os == "Linux":
library.system_libs = ["m", "rt", "dl"]
if not self.options.shared:
library.defines.append("OSG_LIBRARY_STATIC")
library = setup_library("osgDB")
library.requires = ["osg", "osgUtil", "OpenThreads"]
if self.settings.os == "Linux":
library.system_libs = ["dl"]
elif self.settings.os == "Macos":
library.frameworks = ["Carbon", "Cocoa"]
if self.options.with_zlib:
library.requires.append("zlib::zlib")
setup_library("osgUtil").requires = ["osg", "OpenThreads"]
setup_library("osgGA").requires = ["osgDB", "osgUtil", "osg", "OpenThreads"]
library = setup_library("osgText")
library.requires = ["osgDB", "osg", "osgUtil", "OpenThreads"]
if self.options.use_fontconfig:
library.requires.append("fontconfig::fontconfig")
library = setup_library("osgViewer")
library.requires = ["osgGA", "osgText", "osgDB", "osgUtil", "osg"]
if self.options.enable_windowing_system:
if self.settings.os == "Linux":
library.requires.append("xorg::xorg")
elif tools.is_apple_os(self.settings.os):
library.frameworks = ["Cocoa"]
if self.settings.os == "Windows":
library.system_libs = ["gdi32"]
setup_library("osgAnimation").requires = ["osg", "osgText", "osgGA", "osgViewer", "OpenThreads"]
setup_library("osgFX").requires = ["osgUtil", "osgDB", "osg", "OpenThreads"]
setup_library("osgManipulator").requires = ["osgViewer", "osgGA", "osgUtil", "osg", "OpenThreads"]
setup_library("osgParticle").requires = ["osgUtil", "osgDB", "osg", "OpenThreads"]
setup_library("osgUI").requires = ["osgDB", "osgGA", "osgUtil", "osgText", "osgViewer", "osg", "OpenThreads"]
setup_library("osgVolume").requires = ["osgGA", "osgDB", "osgUtil", "osg", "OpenThreads"]
setup_library("osgShadow").requires = ["osgUtil", "osgDB", "osg", "OpenThreads"]
setup_library("osgSim").requires = ["osgText", "osgUtil", "osgDB", "osg", "OpenThreads"]
setup_library("osgTerrain").requires = ["osgUtil", "osgDB", "osg", "OpenThreads"]
setup_library("osgWidget").requires = ["osgText", "osgViewer", "osgDB", "osg", "OpenThreads"]
setup_library("osgPresentation").requires = ["osgViewer", "osgUI", "osgWidget", "osgManipulator", "osgVolume", "osgFX", "osgText", "osgGA", "osgUtil", "osgDB", "osg", "OpenThreads"]
# Start of plugins
# NodeKit/Psudo loader plugins
setup_plugin("osga")
setup_plugin("rot")
setup_plugin("scale")
setup_plugin("trans")
setup_plugin("normals")
setup_plugin("revisions")
setup_plugin("osgviewer").requires.append("osgViewer")
setup_plugin("osgshadow").requires.append("osgShadow")
setup_plugin("osgterrain").requires.append("osgTerrain")
# Main native plugins
setup_plugin("osg")
plugin = setup_plugin("ive")
plugin.requires.extend(("osgSim", "osgFX", "osgText", "osgTerrain", "osgVolume"))
if self.options.with_zlib:
plugin.requires.append("zlib::zlib")
# Viewer plugins
setup_plugin("cfg").requires.append("osgViewer")
# Shader plugins
setup_plugin("glsl")
# Image plugins
setup_plugin("rgb")
setup_plugin("bmp")
setup_plugin("pnm")
setup_plugin("dds")
setup_plugin("tga")
setup_plugin("hdr")
setup_plugin("dot")
setup_plugin("vtf")
setup_plugin("ktx")
if self.options.get_safe("with_jpeg"):
setup_plugin("jpeg").requires.append("libjpeg::libjpeg")
if self.options.with_jasper:
setup_plugin("jp2").requires.append("jasper::jasper")
if self.options.get_safe("with_openexr"):
setup_plugin("exr").requires.append("openexr::openexr")
if self.options.get_safe("with_gif"):
setup_plugin("gif").requires.append("giflib::giflib")
if self.options.get_safe("with_png"):
setup_plugin("png").requires.extend(("libpng::libpng", "zlib::zlib"))
if self.options.with_tiff:
setup_plugin("tiff").requires.append("libtiff::libtiff")
if self.options.with_gdal:
setup_plugin("gdal").requires.extend(("osgTerrain", "gdal::gdal"))
setup_plugin("ogr").requires.append("gdal::gdal")
if self.options.with_gta:
setup_plugin("gta").requires.append("libgta::libgta")
# 3D Image plugins
if self.options.get_safe("with_dcmtk"):
plugin = setup_plugin("dicom")
plugin.requires.extend(("osgVolume", "dcmtk::dcmtk"))
if self.settings.os == "Windows":
plugin.system_libs = ["wsock32", "ws2_32"]
# 3rd party 3d plugins
setup_plugin("3dc")
setup_plugin("p3d").requires.extend(("osgGA", "osgText", "osgVolume", "osgFX", "osgViewer", "osgPresentation"))
if self.options.with_curl:
plugin = setup_plugin("curl")
plugin.requires.append("libcurl::libcurl")
if self.options.with_zlib:
plugin.requires.append("zlib::zlib")
if self.options.with_zlib:
setup_plugin("gz").requires.append("zlib::zlib")
# with_inventor
# setup_plugin("iv")
# with_collada
# setup_plugin("dae")
# with_fbx
# setup_plugin("fbx")
# with_opencascade
# setup_plugin("opencascade")
setup_plugin("bvh").requires.append("osgAnimation")
setup_plugin("x")
setup_plugin("dxf").requires.append("osgText")
setup_plugin("openflight").requires.append("osgSim")
setup_plugin("obj")
setup_plugin("pic")
setup_plugin("stl")
setup_plugin("3ds")
setup_plugin("ac")
setup_plugin("pov")
setup_plugin("logo")
setup_plugin("lws")
setup_plugin("md2")
setup_plugin("osgtgz")
setup_plugin("tgz")
setup_plugin("shp").requires.extend(("osgSim", "osgTerrain"))
setup_plugin("txf").requires.append("osgText")
setup_plugin("bsp")
setup_plugin("mdl")
setup_plugin("gles").requires.extend(("osgUtil", "osgAnimation"))
setup_plugin("osgjs").requires.extend(("osgAnimation", "osgSim"))
setup_plugin("lwo").requires.append("osgFX")
setup_plugin("ply")
setup_plugin("txp").requires.extend(("osgSim", "osgText"))
# with_ffmpeg
# setup_plugin("ffmpeg")
# with_gstreamer
# setup_plugin("gstreamer")
# with_directshow
# setup_plugin("directshow")
if tools.is_apple_os(self.settings.os):
setup_plugin("imageio").frameworks = ["Accelerate"]
if ((self.settings.os == "Macos" and self.settings.os.version and tools.Version(self.settings.os.version) >= "10.8")
or (self.settings.os == "iOS" and tools.Version(self.settings.os.version) >= "6.0")):
plugin = setup_plugin("avfoundation")
plugin.requires.append("osgViewer")
plugin.frameworks = ["AVFoundation", "Cocoa", "CoreVideo", "CoreMedia", "QuartzCore"]
if self.settings.os == "Macos" and self.settings.os.version and tools.Version(self.settings.os.version) <= "10.6" and self.settings.arch == "x86":
setup_plugin("qt").frameworks = ["QuickTime"]
if self.settings.os == "Macos" and self.settings.arch == "x86":
plugin = setup_plugin("QTKit")
plugin.requires.append("osgViewer")
plugin.frameworks = ["QTKit", "Cocoa", "QuickTime", "CoreVideo"]
# with_nvtt
# setup_plugin("nvtt")
if self.options.with_freetype:
setup_plugin("freetype").requires.extend(("osgText", "freetype::freetype"))
if self.options.with_zlib:
setup_plugin("zip")
# with_svg
# setup_plugin("svg")
# with_pdf/poppler
# setup_plugin("pdf")
# with_vnc
# setup_plugin("vnc")
setup_plugin("pvr")
plugin = setup_plugin("osc")
plugin.requires.append("osgGA")
if self.settings.os == "Windows":
plugin.system_libs = ["ws2_32", "winmm"]
setup_plugin("trk")
setup_plugin("tf")
# with_blas
# setup_plugin("las")
setup_plugin("lua")
# with_sdl
# setup_plugin("sdl")
if self.options.get_safe("with_asio", False):
setup_plugin("resthttp").requires.extend(("osgPresentation", "asio::asio", "boost::boost"))
# with_zeroconf
# setup_plugin("zeroconf")
| 40.8 | 189 | 0.624748 | from conans import CMake, ConanFile, tools
from conans.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.29.1"
class OpenSceneGraphConanFile(ConanFile):
name = "openscenegraph"
description = "OpenSceneGraph is an open source high performance 3D graphics toolkit"
topics = ("openscenegraph", "graphics")
url = "https://github.com/conan-io/conan-center-index"
homepage = "http://www.openscenegraph.org"
license = "LGPL-2.1-only", "WxWindows-exception-3.1"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"build_applications": [True, False],
"enable_notify": [True, False],
"enable_deprecated_api": [True, False],
"enable_readfile": [True, False],
"enable_ref_ptr_implicit_output_conversion": [True, False],
"enable_ref_ptr_safe_dereference": [True, False],
"enable_envvar_support": [True, False],
"enable_windowing_system": [True, False],
"enable_deprecated_serializers": [True, False],
"use_fontconfig": [True, False],
"with_asio": [True, False],
"with_curl": [True, False],
"with_dcmtk": [True, False],
"with_freetype": [True, False],
"with_gdal": [True, False],
"with_gif": [True, False],
"with_gta": [True, False],
"with_jasper": [True, False],
"with_jpeg": [True, False],
"with_openexr": [True, False],
"with_png": [True, False],
"with_tiff": [True, False],
"with_zlib": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"build_applications": False,
"enable_notify": True,
"enable_deprecated_api": False,
"enable_readfile": True,
"enable_ref_ptr_implicit_output_conversion": True,
"enable_ref_ptr_safe_dereference": True,
"enable_envvar_support": True,
"enable_windowing_system": True,
"enable_deprecated_serializers": False,
"use_fontconfig": True,
"with_asio": False,
"with_curl": False,
"with_dcmtk": False,
"with_freetype": True,
"with_gdal": False,
"with_gif": True,
"with_gta": False,
"with_jasper": False,
"with_jpeg": True,
"with_openexr": False,
"with_png": True,
"with_tiff": True,
"with_zlib": True,
}
short_paths = True
exports_sources = "CMakeLists.txt", "patches/*.patch"
generators = "cmake", "cmake_find_package"
@property
def _source_subfolder(self):
return "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
del self.options.with_asio
self.options.use_fontconfig = False
if tools.is_apple_os(self.settings.os):
del self.options.with_gif
del self.options.with_jpeg
del self.options.with_png
self.options.with_tiff = False
def configure(self):
if self.options.shared:
del self.options.fPIC
if not self.options.with_zlib:
# These require zlib support
del self.options.with_openexr
del self.options.with_png
del self.options.with_dcmtk
def validate(self):
if self.options.get_safe("with_asio", False):
raise ConanInvalidConfiguration("ASIO support in OSG is broken, see https://github.com/openscenegraph/OpenSceneGraph/issues/921")
if hasattr(self, "settings_build") and tools.cross_building(self):
raise ConanInvalidConfiguration("openscenegraph recipe cannot be cross-built yet. Contributions are welcome.")
def requirements(self):
if self.options.enable_windowing_system and self.settings.os == "Linux":
self.requires("xorg/system")
self.requires("opengl/system")
if self.options.use_fontconfig:
self.requires("fontconfig/2.13.93")
if self.options.get_safe("with_asio", False):
# Should these be private requires?
self.requires("asio/1.18.1")
self.requires("boost/1.75.0")
if self.options.with_curl:
self.requires("libcurl/7.74.0")
if self.options.get_safe("with_dcmtk"):
self.requires("dcmtk/3.6.5")
if self.options.with_freetype:
self.requires("freetype/2.10.4")
if self.options.with_gdal:
self.requires("gdal/3.1.4")
if self.options.get_safe("with_gif"):
self.requires("giflib/5.2.1")
if self.options.with_gta:
self.requires("libgta/1.2.1")
if self.options.with_jasper:
self.requires("jasper/2.0.24")
if self.options.get_safe("with_jpeg"):
self.requires("libjpeg/9d")
if self.options.get_safe("with_openexr"):
self.requires("openexr/2.5.3")
if self.options.get_safe("with_png"):
self.requires("libpng/1.6.37")
if self.options.with_tiff:
self.requires("libtiff/4.2.0")
if self.options.with_zlib:
self.requires("zlib/1.2.11")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
strip_root=True, destination=self._source_subfolder)
def _patch_sources(self):
for patch in self.conan_data["patches"].get(self.version, []):
tools.patch(**patch)
for package in ("Fontconfig", "Freetype", "GDAL", "GIFLIB", "GTA", "Jasper", "OpenEXR"):
# Prefer conan's find package scripts over osg's
os.unlink(os.path.join(self._source_subfolder, "CMakeModules", "Find{}.cmake".format(package)))
def _configured_cmake(self):
if hasattr(self, "_cmake"):
return self._cmake
self._cmake = cmake = CMake(self)
cmake.definitions["USE_3RDPARTY_BIN"] = False
cmake.definitions["DYNAMIC_OPENSCENEGRAPH"] = self.options.shared
cmake.definitions["DYNAMIC_OPENTHREADS"] = self.options.shared
cmake.definitions["BUILD_OSG_APPLICATIONS"] = self.options.build_applications
cmake.definitions["BUILD_OSG_EXAMPLES"] = False
cmake.definitions["OSG_NOTIFY_DISABLED"] = not self.options.enable_notify
cmake.definitions["OSG_USE_DEPRECATED_API"] = self.options.enable_deprecated_api
cmake.definitions["OSG_PROVIDE_READFILE"] = self.options.enable_readfile
cmake.definitions["OSG_USE_REF_PTR_IMPLICIT_OUTPUT_CONVERSION"] = self.options.enable_ref_ptr_implicit_output_conversion
cmake.definitions["OSG_USE_REF_PTR_SAFE_DEREFERENCE"] = self.options.enable_ref_ptr_safe_dereference
cmake.definitions["OSG_ENVVAR_SUPPORTED"] = self.options.enable_envvar_support
if not self.options.enable_windowing_system:
cmake.definitions["OSG_WINDOWING_SYSTEM"] = None
cmake.definitions["BUILD_OSG_DEPRECATED_SERIALIZERS"] = self.options.enable_deprecated_serializers
cmake.definitions["OSG_TEXT_USE_FONTCONFIG"] = self.options.use_fontconfig
# Disable option dependencies unless we have a package for them
cmake.definitions["OSG_WITH_FREETYPE"] = self.options.with_freetype
cmake.definitions["OSG_WITH_OPENEXR"] = self.options.get_safe("with_openexr", False)
cmake.definitions["OSG_WITH_INVENTOR"] = False
cmake.definitions["OSG_WITH_JASPER"] = self.options.with_jasper
cmake.definitions["OSG_WITH_OPENCASCADE"] = False
cmake.definitions["OSG_WITH_FBX"] = False
cmake.definitions["OSG_WITH_ZLIB"] = self.options.with_zlib
cmake.definitions["OSG_WITH_GDAL"] = self.options.with_gdal
cmake.definitions["OSG_WITH_GTA"] = self.options.with_gta
cmake.definitions["OSG_WITH_CURL"] = self.options.with_curl
cmake.definitions["OSG_WITH_LIBVNCSERVER"] = False
cmake.definitions["OSG_WITH_DCMTK"] = self.options.get_safe("with_dcmtk", False)
cmake.definitions["OSG_WITH_FFMPEG"] = False
cmake.definitions["OSG_WITH_DIRECTSHOW"] = False
cmake.definitions["OSG_WITH_SDL"] = False
cmake.definitions["OSG_WITH_POPPLER"] = False
cmake.definitions["OSG_WITH_RSVG"] = False
cmake.definitions["OSG_WITH_NVTT"] = False
cmake.definitions["OSG_WITH_ASIO"] = self.options.get_safe("with_asio", False)
cmake.definitions["OSG_WITH_ZEROCONF"] = False
cmake.definitions["OSG_WITH_LIBLAS"] = False
cmake.definitions["OSG_WITH_GIF"] = self.options.get_safe("with_gif", False)
cmake.definitions["OSG_WITH_JPEG"] = self.options.get_safe("with_jpeg", False)
cmake.definitions["OSG_WITH_PNG"] = self.options.get_safe("with_png", False)
cmake.definitions["OSG_WITH_TIFF"] = self.options.with_tiff
if self.settings.os == "Windows":
# osg has optional quicktime support on Windows
cmake.definitions["CMAKE_DISABLE_FIND_PACKAGE_QuickTime"] = True
cmake.definitions["OSG_MSVC_VERSIONED_DLL"] = False
cmake.configure()
return cmake
def build(self):
self._patch_sources()
self._configured_cmake().build()
def package(self):
self._configured_cmake().install()
self.copy(pattern="LICENSE.txt", dst="licenses", src=self._source_subfolder)
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.remove_files_by_mask(self.package_folder, "*.pdb")
def package_info(self):
# FindOpenSceneGraph.cmake is shipped with cmake and is a traditional cmake script
# It doesn't setup targets and only provides a few variables:
# but it will set the last three of these if the name of the package is OPENSCENEGRAPH (it uses
# the filename for the first, so OpenSceneGraph_FOUND gets set, not OPENSCENEGRAPH_FOUND)
# TODO: set OPENSCENEGRAPH_FOUND in cmake_find_package and cmake_find_package_multi
self.cpp_info.filenames["cmake_find_package"] = "OpenSceneGraph"
self.cpp_info.filenames["cmake_find_package_multi"] = "OpenSceneGraph"
self.cpp_info.names["cmake_find_package"] = "OPENSCENEGRAPH"
self.cpp_info.names["cmake_find_package_multi"] = "OPENSCENEGRAPH"
if self.settings.build_type == "Debug":
postfix = "d"
elif self.settings.build_type == "RelWithDebInfo":
postfix = "rd"
elif self.settings.build_type == "MinSizeRel":
postfix = "s"
else:
postfix = ""
def setup_plugin(plugin):
lib = "osgdb_" + plugin
plugin_library = self.cpp_info.components[lib]
plugin_library.libs = [] if self.options.shared else [lib + postfix]
plugin_library.requires = ["OpenThreads", "osg", "osgDB", "osgUtil"]
if not self.options.shared:
plugin_library.libdirs = [os.path.join("lib", "osgPlugins-{}".format(self.version))]
return plugin_library
def setup_serializers(lib):
plugins = []
if lib not in ("osgDB", "osgWidget", "osgPresentation"):
plugins.append("serializers_{}".format(lib.lower()))
if self.options.enable_deprecated_serializers:
if lib not in ("osgUtil", "osgDB", "osgGA", "osgManipulator", "osgUI", "osgPresentation"):
plugins.append("deprecated_{}".format(lib.lower()))
for plugin in plugins:
setup_plugin(plugin).requires.append(lib)
def setup_library(lib):
library = self.cpp_info.components[lib]
library.libs = [lib + postfix]
library.names["pkg_config"] = "openscenegraph-{}".format(lib)
setup_serializers(lib)
return library
# Core libraries
# requires obtained from osg's source code
library = self.cpp_info.components["OpenThreads"]
library.libs = ["OpenThreads" + postfix]
library.names["pkg_config"] = "openthreads"
if self.settings.os == "Linux":
library.system_libs = ["pthread"]
library = setup_library("osg")
library.requires = ["OpenThreads", "opengl::opengl"]
if self.settings.os == "Linux":
library.system_libs = ["m", "rt", "dl"]
if not self.options.shared:
library.defines.append("OSG_LIBRARY_STATIC")
library = setup_library("osgDB")
library.requires = ["osg", "osgUtil", "OpenThreads"]
if self.settings.os == "Linux":
library.system_libs = ["dl"]
elif self.settings.os == "Macos":
library.frameworks = ["Carbon", "Cocoa"]
if self.options.with_zlib:
library.requires.append("zlib::zlib")
setup_library("osgUtil").requires = ["osg", "OpenThreads"]
setup_library("osgGA").requires = ["osgDB", "osgUtil", "osg", "OpenThreads"]
library = setup_library("osgText")
library.requires = ["osgDB", "osg", "osgUtil", "OpenThreads"]
if self.options.use_fontconfig:
library.requires.append("fontconfig::fontconfig")
library = setup_library("osgViewer")
library.requires = ["osgGA", "osgText", "osgDB", "osgUtil", "osg"]
if self.options.enable_windowing_system:
if self.settings.os == "Linux":
library.requires.append("xorg::xorg")
elif tools.is_apple_os(self.settings.os):
library.frameworks = ["Cocoa"]
if self.settings.os == "Windows":
library.system_libs = ["gdi32"]
setup_library("osgAnimation").requires = ["osg", "osgText", "osgGA", "osgViewer", "OpenThreads"]
setup_library("osgFX").requires = ["osgUtil", "osgDB", "osg", "OpenThreads"]
setup_library("osgManipulator").requires = ["osgViewer", "osgGA", "osgUtil", "osg", "OpenThreads"]
setup_library("osgParticle").requires = ["osgUtil", "osgDB", "osg", "OpenThreads"]
setup_library("osgUI").requires = ["osgDB", "osgGA", "osgUtil", "osgText", "osgViewer", "osg", "OpenThreads"]
setup_library("osgVolume").requires = ["osgGA", "osgDB", "osgUtil", "osg", "OpenThreads"]
setup_library("osgShadow").requires = ["osgUtil", "osgDB", "osg", "OpenThreads"]
setup_library("osgSim").requires = ["osgText", "osgUtil", "osgDB", "osg", "OpenThreads"]
setup_library("osgTerrain").requires = ["osgUtil", "osgDB", "osg", "OpenThreads"]
setup_library("osgWidget").requires = ["osgText", "osgViewer", "osgDB", "osg", "OpenThreads"]
setup_library("osgPresentation").requires = ["osgViewer", "osgUI", "osgWidget", "osgManipulator", "osgVolume", "osgFX", "osgText", "osgGA", "osgUtil", "osgDB", "osg", "OpenThreads"]
setup_plugin("osga")
setup_plugin("rot")
setup_plugin("scale")
setup_plugin("trans")
setup_plugin("normals")
setup_plugin("revisions")
setup_plugin("osgviewer").requires.append("osgViewer")
setup_plugin("osgshadow").requires.append("osgShadow")
setup_plugin("osgterrain").requires.append("osgTerrain")
setup_plugin("osg")
plugin = setup_plugin("ive")
plugin.requires.extend(("osgSim", "osgFX", "osgText", "osgTerrain", "osgVolume"))
if self.options.with_zlib:
plugin.requires.append("zlib::zlib")
setup_plugin("cfg").requires.append("osgViewer")
setup_plugin("glsl")
setup_plugin("rgb")
setup_plugin("bmp")
setup_plugin("pnm")
setup_plugin("dds")
setup_plugin("tga")
setup_plugin("hdr")
setup_plugin("dot")
setup_plugin("vtf")
setup_plugin("ktx")
if self.options.get_safe("with_jpeg"):
setup_plugin("jpeg").requires.append("libjpeg::libjpeg")
if self.options.with_jasper:
setup_plugin("jp2").requires.append("jasper::jasper")
if self.options.get_safe("with_openexr"):
setup_plugin("exr").requires.append("openexr::openexr")
if self.options.get_safe("with_gif"):
setup_plugin("gif").requires.append("giflib::giflib")
if self.options.get_safe("with_png"):
setup_plugin("png").requires.extend(("libpng::libpng", "zlib::zlib"))
if self.options.with_tiff:
setup_plugin("tiff").requires.append("libtiff::libtiff")
if self.options.with_gdal:
setup_plugin("gdal").requires.extend(("osgTerrain", "gdal::gdal"))
setup_plugin("ogr").requires.append("gdal::gdal")
if self.options.with_gta:
setup_plugin("gta").requires.append("libgta::libgta")
if self.options.get_safe("with_dcmtk"):
plugin = setup_plugin("dicom")
plugin.requires.extend(("osgVolume", "dcmtk::dcmtk"))
if self.settings.os == "Windows":
plugin.system_libs = ["wsock32", "ws2_32"]
setup_plugin("3dc")
setup_plugin("p3d").requires.extend(("osgGA", "osgText", "osgVolume", "osgFX", "osgViewer", "osgPresentation"))
if self.options.with_curl:
plugin = setup_plugin("curl")
plugin.requires.append("libcurl::libcurl")
if self.options.with_zlib:
plugin.requires.append("zlib::zlib")
if self.options.with_zlib:
setup_plugin("gz").requires.append("zlib::zlib")
setup_plugin("bvh").requires.append("osgAnimation")
setup_plugin("x")
setup_plugin("dxf").requires.append("osgText")
setup_plugin("openflight").requires.append("osgSim")
setup_plugin("obj")
setup_plugin("pic")
setup_plugin("stl")
setup_plugin("3ds")
setup_plugin("ac")
setup_plugin("pov")
setup_plugin("logo")
setup_plugin("lws")
setup_plugin("md2")
setup_plugin("osgtgz")
setup_plugin("tgz")
setup_plugin("shp").requires.extend(("osgSim", "osgTerrain"))
setup_plugin("txf").requires.append("osgText")
setup_plugin("bsp")
setup_plugin("mdl")
setup_plugin("gles").requires.extend(("osgUtil", "osgAnimation"))
setup_plugin("osgjs").requires.extend(("osgAnimation", "osgSim"))
setup_plugin("lwo").requires.append("osgFX")
setup_plugin("ply")
setup_plugin("txp").requires.extend(("osgSim", "osgText"))
if tools.is_apple_os(self.settings.os):
setup_plugin("imageio").frameworks = ["Accelerate"]
if ((self.settings.os == "Macos" and self.settings.os.version and tools.Version(self.settings.os.version) >= "10.8")
or (self.settings.os == "iOS" and tools.Version(self.settings.os.version) >= "6.0")):
plugin = setup_plugin("avfoundation")
plugin.requires.append("osgViewer")
plugin.frameworks = ["AVFoundation", "Cocoa", "CoreVideo", "CoreMedia", "QuartzCore"]
if self.settings.os == "Macos" and self.settings.os.version and tools.Version(self.settings.os.version) <= "10.6" and self.settings.arch == "x86":
setup_plugin("qt").frameworks = ["QuickTime"]
if self.settings.os == "Macos" and self.settings.arch == "x86":
plugin = setup_plugin("QTKit")
plugin.requires.append("osgViewer")
plugin.frameworks = ["QTKit", "Cocoa", "QuickTime", "CoreVideo"]
if self.options.with_freetype:
setup_plugin("freetype").requires.extend(("osgText", "freetype::freetype"))
if self.options.with_zlib:
setup_plugin("zip")
setup_plugin("pvr")
plugin = setup_plugin("osc")
plugin.requires.append("osgGA")
if self.settings.os == "Windows":
plugin.system_libs = ["ws2_32", "winmm"]
setup_plugin("trk")
setup_plugin("tf")
setup_plugin("lua")
if self.options.get_safe("with_asio", False):
setup_plugin("resthttp").requires.extend(("osgPresentation", "asio::asio", "boost::boost"))
| true | true |
f7210d73ebb7dc89db96399282088b5d3bdb983b | 5,049 | py | Python | assignments2016/assignment1/cs231n/classifiers/linear_svm.py | janlukasschroeder/Stanford-cs231n | 0502fad608971f0ae4f44c5e5fd8cc062ddfc1f1 | [
"MIT"
] | null | null | null | assignments2016/assignment1/cs231n/classifiers/linear_svm.py | janlukasschroeder/Stanford-cs231n | 0502fad608971f0ae4f44c5e5fd8cc062ddfc1f1 | [
"MIT"
] | null | null | null | assignments2016/assignment1/cs231n/classifiers/linear_svm.py | janlukasschroeder/Stanford-cs231n | 0502fad608971f0ae4f44c5e5fd8cc062ddfc1f1 | [
"MIT"
] | null | null | null | import numpy as np
from random import shuffle
def svm_loss_naive(W, X, y, reg):
"""
Structured SVM loss function, naive implementation (with loops).
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
dW = np.zeros(W.shape) # initialize the gradient as zero
# compute the loss and the gradient
num_classes = W.shape[1]
num_train = X.shape[0]
loss = 0.0
for i in xrange(num_train):
scores = X[i].dot(W)
correct_class_score = scores[y[i]]
for j in xrange(num_classes):
if j == y[i]:
continue
margin = scores[j] - correct_class_score + 1 # note delta = 1
if margin > 0:
dW[:, y[i]] += -X[i]
dW[:, j] += X[i] # gradient update for incorrect rows
loss += margin
# Average gradients as well
dW /= num_train
# Add regularization to the gradient
dW += reg * W
# Right now the loss is a sum over all training examples, but we want it
# to be an average instead so we divide by num_train.
loss /= num_train
# Add regularization to the loss.
loss += 0.5 * reg * np.sum(W * W)
#############################################################################
# TODO: #
# Compute the gradient of the loss function and store it dW. #
# Rather that first computing the loss and then computing the derivative, #
# it may be simpler to compute the derivative at the same time that the #
# loss is being computed. As a result you may need to modify some of the #
# code above to compute the gradient. #
#############################################################################
return loss, dW
def svm_loss_vectorized(W, X, y, reg):
"""
Structured SVM loss function, vectorized implementation.
Inputs and outputs are the same as svm_loss_naive.
"""
loss = 0.0
dW = np.zeros(W.shape) # initialize the gradient as zero
#############################################################################
# TODO: #
# Implement a vectorized version of the structured SVM loss, storing the #
# result in loss. #
#############################################################################
print 'X.shape: ', X.shape
print 'y.shape: ', y.shape
print 'W.shape: ', W.shape
scores = X.dot(W) # 500 x 10 matrix
print 'scores.shape: ', scores.shape
correct_scores = np.ones(scores.shape) * y[:,np.newaxis] # 500 x 10
deltas = np.ones(scores.shape) # 1 matrix, 500 x 10
L = scores - correct_scores + deltas
print 'L.shape: ', L.shape
L[L < 0] = 0 # set all negative values to 0, replaces max(0, scores - scores[y] + 1)
L[np.arange(0, scores.shape[0]), y] = 0 # don't count y_i
# sum losses of single image per row, results in column vector: 500 x 1
loss = np.sum(L, axis=1)
# caluclate final average loss
loss = np.sum(loss) / X.shape[0]
# Add L2 regularization
loss += 0.5 * reg * np.sum(W * W)
print 'loss', loss
#############################################################################
# END OF YOUR CODE #
#############################################################################
#############################################################################
# TODO: #
# Implement a vectorized version of the gradient for the structured SVM #
# loss, storing the result in dW. #
# #
# Hint: Instead of computing the gradient from scratch, it may be easier #
# to reuse some of the intermediate values that you used to compute the #
# loss. #
#############################################################################
#L[L > 0] = 1
#L[np.arange(0, scores.shape[0]), y] = -1 * np.sum(L, axis=1)
#dW = np.dot(L, X.T)
dW = np.gradient(scores)
# Average over number of training examples
#num_train = X.shape[0]
#dW /= num_train
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW
| 35.307692 | 86 | 0.465241 | import numpy as np
from random import shuffle
def svm_loss_naive(W, X, y, reg):
"""
Structured SVM loss function, naive implementation (with loops).
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
dW = np.zeros(W.shape)
num_classes = W.shape[1]
num_train = X.shape[0]
loss = 0.0
for i in xrange(num_train):
scores = X[i].dot(W)
correct_class_score = scores[y[i]]
for j in xrange(num_classes):
if j == y[i]:
continue
margin = scores[j] - correct_class_score + 1
if margin > 0:
dW[:, y[i]] += -X[i]
dW[:, j] += X[i]
loss += margin
dW /= num_train
dW += reg * W
loss /= num_train
loss += 0.5 * reg * np.sum(W * W)
| false | true |
f7210dc85edd4d0b6ad091c50f23892394528a1e | 1,558 | py | Python | examples/aws_lambda/aws_lambda_oauth.py | korymath/bolt-python | 67e0286d756ba92510315d044303f43b03380b52 | [
"MIT"
] | 1 | 2021-05-02T16:06:44.000Z | 2021-05-02T16:06:44.000Z | examples/aws_lambda/aws_lambda_oauth.py | korymath/bolt-python | 67e0286d756ba92510315d044303f43b03380b52 | [
"MIT"
] | 1 | 2021-02-23T21:05:57.000Z | 2021-02-23T21:05:57.000Z | examples/aws_lambda/aws_lambda_oauth.py | korymath/bolt-python | 67e0286d756ba92510315d044303f43b03380b52 | [
"MIT"
] | null | null | null | # ------------------------------------------------
# instead of slack_bolt in requirements.txt
import sys
sys.path.insert(1, "vendor")
# ------------------------------------------------
import logging
from slack_bolt import App
from slack_bolt.adapter.aws_lambda import SlackRequestHandler
from slack_bolt.adapter.aws_lambda.lambda_s3_oauth_flow import LambdaS3OAuthFlow
# process_before_response must be True when running on FaaS
app = App(process_before_response=True, oauth_flow=LambdaS3OAuthFlow(),)
@app.event("app_mention")
def handle_app_mentions(body, say, logger):
logger.info(body)
say("What's up?")
@app.command("/hello-bolt-python-lambda")
def respond_to_slack_within_3_seconds(ack):
# This method is for synchronous communication with the Slack API server
ack("Thanks!")
SlackRequestHandler.clear_all_log_handlers()
logging.basicConfig(format="%(asctime)s %(message)s", level=logging.DEBUG)
def handler(event, context):
slack_handler = SlackRequestHandler(app=app)
return slack_handler.handle(event, context)
# # -- OAuth flow -- #
# export SLACK_SIGNING_SECRET=***
# export SLACK_BOT_TOKEN=xoxb-***
# export SLACK_CLIENT_ID=111.111
# export SLACK_CLIENT_SECRET=***
# export SLACK_SCOPES=app_mentions:read,chat:write
# AWS IAM Role: bolt_python_s3_storage
# - AmazonS3FullAccess
# - AWSLambdaBasicExecutionRole
# rm -rf latest_slack_bolt && cp -pr ../../src latest_slack_bolt
# pip install python-lambda
# lambda deploy --config-file aws_lambda_oauth_config.yaml --requirements requirements_oauth.txt
| 29.396226 | 96 | 0.727856 |
import sys
sys.path.insert(1, "vendor")
import logging
from slack_bolt import App
from slack_bolt.adapter.aws_lambda import SlackRequestHandler
from slack_bolt.adapter.aws_lambda.lambda_s3_oauth_flow import LambdaS3OAuthFlow
app = App(process_before_response=True, oauth_flow=LambdaS3OAuthFlow(),)
@app.event("app_mention")
def handle_app_mentions(body, say, logger):
logger.info(body)
say("What's up?")
@app.command("/hello-bolt-python-lambda")
def respond_to_slack_within_3_seconds(ack):
# This method is for synchronous communication with the Slack API server
ack("Thanks!")
SlackRequestHandler.clear_all_log_handlers()
logging.basicConfig(format="%(asctime)s %(message)s", level=logging.DEBUG)
def handler(event, context):
slack_handler = SlackRequestHandler(app=app)
return slack_handler.handle(event, context)
# # -- OAuth flow -- #
# export SLACK_SIGNING_SECRET=***
# export SLACK_BOT_TOKEN=xoxb-***
# export SLACK_CLIENT_ID=111.111
# export SLACK_CLIENT_SECRET=***
# export SLACK_SCOPES=app_mentions:read,chat:write
# AWS IAM Role: bolt_python_s3_storage
# - AmazonS3FullAccess
# - AWSLambdaBasicExecutionRole
# rm -rf latest_slack_bolt && cp -pr ../../src latest_slack_bolt
# pip install python-lambda
# lambda deploy --config-file aws_lambda_oauth_config.yaml --requirements requirements_oauth.txt
| true | true |
f7210e74f4ea154ad8e0c98314be558c787c9440 | 483 | py | Python | app/settings.py | rchapman83/sticks-clothing | dfdb5283b00c9209f854648e50f30140a0bb3004 | [
"MIT"
] | null | null | null | app/settings.py | rchapman83/sticks-clothing | dfdb5283b00c9209f854648e50f30140a0bb3004 | [
"MIT"
] | null | null | null | app/settings.py | rchapman83/sticks-clothing | dfdb5283b00c9209f854648e50f30140a0bb3004 | [
"MIT"
] | null | null | null | # -*- settings:utf-8 -*-
# Flask settings
import logging
import os
proj_name = os.environ.get('PROJECT_NAME')
debug_mode = os.environ.get('FLASK_DEBUG')
secret_code = os.environ.get('FLASK_SECRET')
DEBUG = debug_mode
TESTING = False
USE_X_SENDFILE = False
CSRF_ENABLED = True
SECRET_KEY = secret_code
# LOGGING
LOGGER_NAME = '%s_log' % proj_name
LOG_FILENAME = '/var/tmp/app.%s.log' % proj_name
LOG_LEVEL = logging.INFO
LOG_FORMAT = '%(asctime)s %(levelname)s\t: %(message)s'
| 21.954545 | 55 | 0.730849 |
import logging
import os
proj_name = os.environ.get('PROJECT_NAME')
debug_mode = os.environ.get('FLASK_DEBUG')
secret_code = os.environ.get('FLASK_SECRET')
DEBUG = debug_mode
TESTING = False
USE_X_SENDFILE = False
CSRF_ENABLED = True
SECRET_KEY = secret_code
LOGGER_NAME = '%s_log' % proj_name
LOG_FILENAME = '/var/tmp/app.%s.log' % proj_name
LOG_LEVEL = logging.INFO
LOG_FORMAT = '%(asctime)s %(levelname)s\t: %(message)s'
| true | true |
f7210f83b40555129d292b05eb3bd12a490ff744 | 1,857 | py | Python | samplers.py | linkserendipity/deep-person-reid | 564ccf307336af1b3343fa42c55f9d53df0fa20a | [
"MIT"
] | null | null | null | samplers.py | linkserendipity/deep-person-reid | 564ccf307336af1b3343fa42c55f9d53df0fa20a | [
"MIT"
] | null | null | null | samplers.py | linkserendipity/deep-person-reid | 564ccf307336af1b3343fa42c55f9d53df0fa20a | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from collections import defaultdict
import numpy as np
import torch
from torch.utils.data.sampler import Sampler
class RandomIdentitySampler(Sampler):
"""
Randomly sample N identities, then for each identity,
randomly sample K instances, therefore batch size is N*K.
Code imported from https://github.com/Cysu/open-reid/blob/master/reid/utils/data/sampler.py.
Args:
data_source (Dataset): dataset to sample from.
num_instances (int): number of instances per identity.
"""
def __init__(self, data_source, num_instances=4):
self.data_source = data_source
self.num_instances = num_instances
self.index_dic = defaultdict(list)
for index, (_, pid, _) in enumerate(data_source):
self.index_dic[pid].append(index)
self.pids = list(self.index_dic.keys())
self.num_identities = len(self.pids)
def __iter__(self):
# 3004 pictures list 32 batch_size [aaaaaaaaaaaaaaaaaa]
indices = torch.randperm(self.num_identities) # shuffle for 751 ids
ret = [] # [1111 2222 3333 4444 5555 6666 7777 ... 751 751 751 751] len(ret)=3004
for i in indices:
pid = self.pids[i]
t = self.index_dic[pid]
replace = False if len(t) >= self.num_instances else True
t = np.random.choice(t, size=self.num_instances, replace=replace) # choose 4 pictures from t pictures
ret.extend(t)
# from IPython import embed
# embed()
return iter(ret)
def __len__(self):
return self.num_identities * self.num_instances
# if __name__ == "__main__":
# from util.data_manager import Market1501
# dataset = Market1501(root='/home/ls')
# sampler = RandomIdentitySampler(dataset.train)
# a = sampler.__iter__() | 37.14 | 113 | 0.662897 | from __future__ import absolute_import
from collections import defaultdict
import numpy as np
import torch
from torch.utils.data.sampler import Sampler
class RandomIdentitySampler(Sampler):
def __init__(self, data_source, num_instances=4):
self.data_source = data_source
self.num_instances = num_instances
self.index_dic = defaultdict(list)
for index, (_, pid, _) in enumerate(data_source):
self.index_dic[pid].append(index)
self.pids = list(self.index_dic.keys())
self.num_identities = len(self.pids)
def __iter__(self):
indices = torch.randperm(self.num_identities)
ret = []
for i in indices:
pid = self.pids[i]
t = self.index_dic[pid]
replace = False if len(t) >= self.num_instances else True
t = np.random.choice(t, size=self.num_instances, replace=replace)
ret.extend(t)
return iter(ret)
def __len__(self):
return self.num_identities * self.num_instances
| true | true |
f7210fbfe983a9e81665dcac17e1a9498a07d28d | 5,545 | py | Python | examples/pwr_run/ml_regression/new_speedup_def/knn_k80.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/ml_regression/new_speedup_def/knn_k80.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/ml_regression/new_speedup_def/knn_k80.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | import pandas
import pdb
from datetime import datetime
import matplotlib
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import glob
import sys
from matplotlib.ticker import MultipleLocator
from scipy.stats import pearsonr, spearmanr
from sklearn import neighbors
from sklearn.metrics import mean_squared_error
from math import sqrt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import json
log_dir = '/scratch/li.baol/GPU_pwr_meas/tensorflow/round1/regression/pwr/*'
dirs = glob.glob(log_dir)
dirs.sort()
# store everything in a dict
all_pwr = {} # {densenet121_32:{K80:a, K100:b}...}
for tc in dirs:
test = tc.split('/')[6+1+1].split('.')[0]
gpu = test.split('_')[0]
model = test.replace(gpu + '_', '')
# read tc.csv into a list
data = pandas.read_csv(tc)
pwr = np.asarray(data[data.columns[0]].tolist())
if model in all_pwr:
all_pwr[model][gpu] = pwr
else:
all_pwr[model] = {gpu: pwr}
log_dir = '/scratch/li.baol/GPU_pwr_meas/tensorflow/round1/regression/util/*'
dirs = glob.glob(log_dir)
dirs.sort()
# store everything in a dict
all_util = {} # {densenet121_32:{K80:a, K100:b}...}
for tc in dirs:
test = tc.split('/')[6+1+1].split('.')[0]
gpu = test.split('_')[0]
model = test.replace(gpu + '_', '')
# read tc.csv into a list
data = pandas.read_csv(tc)
util = np.asarray(data[data.columns[0]].tolist())
if model in all_util:
all_util[model][gpu] = util
else:
all_util[model] = {gpu: util}
log_dir = '/scratch/li.baol/GPU_pwr_meas/tensorflow/round1/regression/mem_util/*'
dirs = glob.glob(log_dir)
dirs.sort()
# store everything in a dict
all_mem_util = {} # {densenet121_32:{K80:a, K100:b}...}
for tc in dirs:
test = tc.split('/')[6+1+1].split('.')[0]
gpu = test.split('_')[0]
model = test.replace(gpu + '_', '')
# read tc.csv into a list
data = pandas.read_csv(tc)
mem_util = np.asarray(data[data.columns[0]].tolist())
if model in all_mem_util:
all_mem_util[model][gpu] = mem_util
else:
all_mem_util[model] = {gpu: mem_util}
log_dir = '/scratch/li.baol/GPU_time_meas/tensorflow/round1/csv/*'
dirs = glob.glob(log_dir)
dirs.sort()
# store everything in a dict
all_time = {} # {densenet121_32:{K80:a, K100:b}...}
for tc in dirs:
test = tc.split('/')[6+1].split('.')[0]
gpu = test.split('_')[0]
model = test.replace(gpu + '_', '')
# read tc.csv into a list
data = pandas.read_csv(tc)
time = np.asarray(data[data.columns[0]].tolist())
if model in all_time:
all_time[model][gpu] = time
else:
all_time[model] = {gpu: time}
# Now plot V100 power save ratio (%) vs K80 power(W)
x1_data = [] # power
x2_data = [] # speed
x3_data = [] # utilization
x4_data = [] # mem util
y_data = []
for key in all_pwr:
# if ('mnasnet' not in key and 'mobilenet' not in key):
for i in all_pwr[key]['K80'].tolist(): # power
x1_data.append(i)
for i in (1 / all_time[key]['K80']).tolist(): # speed
x2_data.append(i)
for i in (all_util[key]['K80']).tolist(): # utilization
x3_data.append(i)
for i in (all_mem_util[key]['K80']).tolist(): # mem util
x4_data.append(i)
for i in (all_time[key]['K80'] / all_time[key]['V100']).tolist(): # speed up
y_data.append(i)
x1_norm = [(i - min(x1_data)) / (max(x1_data) - min(x1_data)) for i in x1_data]
x2_norm = [(i - min(x2_data)) / (max(x2_data) - min(x2_data)) for i in x2_data]
x3_norm = [(i - min(x3_data)) / (max(x3_data) - min(x3_data)) for i in x3_data]
x4_norm = [(i - min(x4_data)) / (max(x4_data) - min(x4_data)) for i in x4_data]
# create training data
x_data = []
for i in range(len(x1_norm)):
x_data.append([x1_norm[i], x2_norm[i], x3_norm[i], x4_norm[i]])
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.3)
with open('x1_data.json', 'w') as outfile:
json.dump(x1_data, outfile)
with open('x2_data.json', 'w') as outfile:
json.dump(x2_data, outfile)
with open('x3_data.json', 'w') as outfile:
json.dump(x3_data, outfile)
with open('x4_data.json', 'w') as outfile:
json.dump(x4_data, outfile)
with open('y_data.json', 'w') as outfile:
json.dump(y_data, outfile)
#with open('x_data.json') as f:
# x_data = json.load(f)
#with open('y_data.json') as f:
# y_data = json.load(f)
#x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.3)
rmse_val = [] #to store rmse values for different k
for K in range(20):
K = K+1
model = neighbors.KNeighborsRegressor(n_neighbors = K, weights='distance')
model.fit(x_train, y_train) #fit the model
pred = model.predict(x_test) #make prediction on test set
# model.predict(np.array(x_test[0]).reshape((1, -1)))
err = sqrt(mean_squared_error(y_test, pred)) #calculate rmse
rmse_val.append(err) #store rmse values
err_pct = abs(y_test-pred) / y_test * 100
print('RMSE value for k= ' , K , 'is:', err)
print('error (%) is', np.mean(err_pct))
xx_data = []
for i in range(len(x1_norm)):
xx_data.append([x1_norm[i]])
# now compare with liear regression
x_train, x_test, y_train, y_test = train_test_split(xx_data, y_data, test_size=0.3)
model2 = LinearRegression().fit(x_train, y_train)
pred = model2.predict(x_test) #make prediction on test set
err = sqrt(mean_squared_error(y_test,pred)) #calculate rmse
print('RMSE value for linear regression is ', err)
| 31.327684 | 83 | 0.658431 | import pandas
import pdb
from datetime import datetime
import matplotlib
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import glob
import sys
from matplotlib.ticker import MultipleLocator
from scipy.stats import pearsonr, spearmanr
from sklearn import neighbors
from sklearn.metrics import mean_squared_error
from math import sqrt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import json
log_dir = '/scratch/li.baol/GPU_pwr_meas/tensorflow/round1/regression/pwr/*'
dirs = glob.glob(log_dir)
dirs.sort()
all_pwr = {}
for tc in dirs:
test = tc.split('/')[6+1+1].split('.')[0]
gpu = test.split('_')[0]
model = test.replace(gpu + '_', '')
data = pandas.read_csv(tc)
pwr = np.asarray(data[data.columns[0]].tolist())
if model in all_pwr:
all_pwr[model][gpu] = pwr
else:
all_pwr[model] = {gpu: pwr}
log_dir = '/scratch/li.baol/GPU_pwr_meas/tensorflow/round1/regression/util/*'
dirs = glob.glob(log_dir)
dirs.sort()
all_util = {}
for tc in dirs:
test = tc.split('/')[6+1+1].split('.')[0]
gpu = test.split('_')[0]
model = test.replace(gpu + '_', '')
data = pandas.read_csv(tc)
util = np.asarray(data[data.columns[0]].tolist())
if model in all_util:
all_util[model][gpu] = util
else:
all_util[model] = {gpu: util}
log_dir = '/scratch/li.baol/GPU_pwr_meas/tensorflow/round1/regression/mem_util/*'
dirs = glob.glob(log_dir)
dirs.sort()
all_mem_util = {}
for tc in dirs:
test = tc.split('/')[6+1+1].split('.')[0]
gpu = test.split('_')[0]
model = test.replace(gpu + '_', '')
data = pandas.read_csv(tc)
mem_util = np.asarray(data[data.columns[0]].tolist())
if model in all_mem_util:
all_mem_util[model][gpu] = mem_util
else:
all_mem_util[model] = {gpu: mem_util}
log_dir = '/scratch/li.baol/GPU_time_meas/tensorflow/round1/csv/*'
dirs = glob.glob(log_dir)
dirs.sort()
all_time = {}
for tc in dirs:
test = tc.split('/')[6+1].split('.')[0]
gpu = test.split('_')[0]
model = test.replace(gpu + '_', '')
data = pandas.read_csv(tc)
time = np.asarray(data[data.columns[0]].tolist())
if model in all_time:
all_time[model][gpu] = time
else:
all_time[model] = {gpu: time}
x1_data = []
x2_data = []
x3_data = []
x4_data = []
y_data = []
for key in all_pwr:
for i in all_pwr[key]['K80'].tolist():
x1_data.append(i)
for i in (1 / all_time[key]['K80']).tolist():
x2_data.append(i)
for i in (all_util[key]['K80']).tolist():
x3_data.append(i)
for i in (all_mem_util[key]['K80']).tolist():
x4_data.append(i)
for i in (all_time[key]['K80'] / all_time[key]['V100']).tolist():
y_data.append(i)
x1_norm = [(i - min(x1_data)) / (max(x1_data) - min(x1_data)) for i in x1_data]
x2_norm = [(i - min(x2_data)) / (max(x2_data) - min(x2_data)) for i in x2_data]
x3_norm = [(i - min(x3_data)) / (max(x3_data) - min(x3_data)) for i in x3_data]
x4_norm = [(i - min(x4_data)) / (max(x4_data) - min(x4_data)) for i in x4_data]
x_data = []
for i in range(len(x1_norm)):
x_data.append([x1_norm[i], x2_norm[i], x3_norm[i], x4_norm[i]])
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.3)
with open('x1_data.json', 'w') as outfile:
json.dump(x1_data, outfile)
with open('x2_data.json', 'w') as outfile:
json.dump(x2_data, outfile)
with open('x3_data.json', 'w') as outfile:
json.dump(x3_data, outfile)
with open('x4_data.json', 'w') as outfile:
json.dump(x4_data, outfile)
with open('y_data.json', 'w') as outfile:
json.dump(y_data, outfile)
rmse_val = []
for K in range(20):
K = K+1
model = neighbors.KNeighborsRegressor(n_neighbors = K, weights='distance')
model.fit(x_train, y_train)
pred = model.predict(x_test)
err = sqrt(mean_squared_error(y_test, pred))
rmse_val.append(err)
err_pct = abs(y_test-pred) / y_test * 100
print('RMSE value for k= ' , K , 'is:', err)
print('error (%) is', np.mean(err_pct))
xx_data = []
for i in range(len(x1_norm)):
xx_data.append([x1_norm[i]])
x_train, x_test, y_train, y_test = train_test_split(xx_data, y_data, test_size=0.3)
model2 = LinearRegression().fit(x_train, y_train)
pred = model2.predict(x_test)
err = sqrt(mean_squared_error(y_test,pred))
print('RMSE value for linear regression is ', err)
| true | true |
f7210feadbc98c8ee9e14ec28cba851c6e06e25b | 1,367 | py | Python | ssseg/cfgs/fcn/cfgs_voc_resnest101os8.py | nianjiuhuiyi/sssegmentation | 4fc12ea7b80fe83170b6d3da0826e53a99ef5325 | [
"MIT"
] | 411 | 2020-10-22T02:24:57.000Z | 2022-03-31T11:19:17.000Z | ssseg/cfgs/fcn/cfgs_voc_resnest101os8.py | nianjiuhuiyi/sssegmentation | 4fc12ea7b80fe83170b6d3da0826e53a99ef5325 | [
"MIT"
] | 24 | 2020-12-21T03:53:54.000Z | 2022-03-17T06:50:00.000Z | ssseg/cfgs/fcn/cfgs_voc_resnest101os8.py | nianjiuhuiyi/sssegmentation | 4fc12ea7b80fe83170b6d3da0826e53a99ef5325 | [
"MIT"
] | 59 | 2020-12-04T03:40:12.000Z | 2022-03-30T09:12:47.000Z | '''define the config file for voc and resnest101os8'''
import os
from .base_cfg import *
# modify dataset config
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG.update({
'type': 'voc',
'rootdir': os.path.join(os.getcwd(), 'VOCdevkit/VOC2012'),
})
DATASET_CFG['train']['set'] = 'trainaug'
# modify dataloader config
DATALOADER_CFG = DATALOADER_CFG.copy()
# modify optimizer config
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'max_epochs': 60,
}
)
# modify losses config
LOSSES_CFG = LOSSES_CFG.copy()
# modify model config
MODEL_CFG = MODEL_CFG.copy()
MODEL_CFG.update(
{
'num_classes': 21,
'backbone': {
'type': 'resnest101',
'series': 'resnest',
'pretrained': True,
'outstride': 8,
'selected_indices': (2, 3),
},
}
)
# modify inference config
INFERENCE_CFG = INFERENCE_CFG.copy()
# modify common config
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'fcn_resnest101os8_voc_train',
'logfilepath': 'fcn_resnest101os8_voc_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'fcn_resnest101os8_voc_test',
'logfilepath': 'fcn_resnest101os8_voc_test/test.log',
'resultsavepath': 'fcn_resnest101os8_voc_test/fcn_resnest101os8_voc_results.pkl'
}
) | 25.314815 | 88 | 0.653255 | import os
from .base_cfg import *
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG.update({
'type': 'voc',
'rootdir': os.path.join(os.getcwd(), 'VOCdevkit/VOC2012'),
})
DATASET_CFG['train']['set'] = 'trainaug'
DATALOADER_CFG = DATALOADER_CFG.copy()
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'max_epochs': 60,
}
)
LOSSES_CFG = LOSSES_CFG.copy()
MODEL_CFG = MODEL_CFG.copy()
MODEL_CFG.update(
{
'num_classes': 21,
'backbone': {
'type': 'resnest101',
'series': 'resnest',
'pretrained': True,
'outstride': 8,
'selected_indices': (2, 3),
},
}
)
INFERENCE_CFG = INFERENCE_CFG.copy()
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'fcn_resnest101os8_voc_train',
'logfilepath': 'fcn_resnest101os8_voc_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'fcn_resnest101os8_voc_test',
'logfilepath': 'fcn_resnest101os8_voc_test/test.log',
'resultsavepath': 'fcn_resnest101os8_voc_test/fcn_resnest101os8_voc_results.pkl'
}
) | true | true |
f721104366206bc775401b5c4d6634e901a2440d | 495 | py | Python | skype2.py | tullowhurler/GMIT-project-submissions | 5c75d5303bbdf75068b2b874debccf3531c7b80b | [
"Apache-2.0"
] | null | null | null | skype2.py | tullowhurler/GMIT-project-submissions | 5c75d5303bbdf75068b2b874debccf3531c7b80b | [
"Apache-2.0"
] | null | null | null | skype2.py | tullowhurler/GMIT-project-submissions | 5c75d5303bbdf75068b2b874debccf3531c7b80b | [
"Apache-2.0"
] | null | null | null | #Solution 2
#16/3/18 Ian's Solution
def ispalindrome(s): # s is the string
ans = True # thats what will print out
for i in range(len(s)): # loops through s which we put down in print
if s[i] != s[len(s) - 1 -i]: # len s of radar is = 5 as there is 5 digits, we want to get to 0-4 so have to -1, i starts at 0 and
ans = False # if i is not = i returns false
return ans # have to have return in the function
print(ispalindrome("eye"))
print(ispalindrome("eyes")) | 35.357143 | 137 | 0.640404 |
def ispalindrome(s): # s is the string
ans = True # thats what will print out
for i in range(len(s)): # loops through s which we put down in print
if s[i] != s[len(s) - 1 -i]: # len s of radar is = 5 as there is 5 digits, we want to get to 0-4 so have to -1, i starts at 0 and
ans = False # if i is not = i returns false
return ans # have to have return in the function
print(ispalindrome("eye"))
print(ispalindrome("eyes")) | true | true |
f7211163c547410a5d37c79cba8d58a47a6c46de | 7,205 | py | Python | final-exam/tic_toc_toe_messy.py | Tanner-York-Make-School/SPD-2.31-Testing-and-Architecture | 623537a05cf5a9d50370a414a5056a78f95288eb | [
"MIT"
] | null | null | null | final-exam/tic_toc_toe_messy.py | Tanner-York-Make-School/SPD-2.31-Testing-and-Architecture | 623537a05cf5a9d50370a414a5056a78f95288eb | [
"MIT"
] | null | null | null | final-exam/tic_toc_toe_messy.py | Tanner-York-Make-School/SPD-2.31-Testing-and-Architecture | 623537a05cf5a9d50370a414a5056a78f95288eb | [
"MIT"
] | null | null | null | """
Tic Tac Toe
Reference: With modification from http://inventwithpython.com/chapter10.html.
# TODOs:
# 1. Find all TODO items and see whether you can improve the code.
# In most cases (if not all), you can make them more readable/modular.
# 2. Add/fix function's docstrings
"""
import random
# I didn't refactor the draw and is_winner, that uses the magic number 10,
# function because that would be drastically changing how the
# code works. Instead of creating a normal tic tac toe game like intended,
# it would add a new feature for creating larger boards, no longer making this
# refactoring but adding a new feature.
def draw_board(board):
"""This function prints out the board that it was passed."""
# "board" is a list of 10 strings representing the board (ignore index 0)
print(' | |')
print(' ' + board[1] + ' | ' + board[2] + ' | ' + board[3])
print(' | |')
print('-----------')
print(' | |')
print(' ' + board[4] + ' | ' + board[5] + ' | ' + board[6])
print(' | |')
print('-----------')
print(' | |')
print(' ' + board[7] + ' | ' + board[8] + ' | ' + board[9])
print(' | |')
def input_player_letter():
"""Lets the player type which letter they want to be. Returns a list with the
player’s letter as the first item, and the computer's letter as the second."""
letter = ''
while letter not in ('X', 'O'):
print('Do you want to be X or O?')
letter = input().upper()
# the first element in the list is the player’s letter, the second is the computer's letter.
if letter == 'X':
return ['X', 'O']
return ['O', 'X']
def who_goes_first():
"""Randomly choose the player who goes first."""
if random.randint(0, 1) == 0:
return 'computer'
return 'player'
def play_again():
"""Returns True if the player wants to play again, otherwise it returns False."""
print('Do you want to play again? (yes or no)')
return input().lower().startswith('y')
def make_move(board, letter, move):
"""Makes a move on the given board with the given letter and move"""
board[move] = letter
def is_winner(board, letter):
"""Given a board and a player’s letter, this function returns True if
that player has won."""
return ((board[1] == letter and board[2] == letter and board[3] == letter) or # across the top
(board[4] == letter and board[5] == letter and board[6] == letter) or # across the middle
(board[7] == letter and board[8] == letter and board[9] == letter) or # across the bottom
(board[1] == letter and board[4] == letter and board[7] == letter) or # down the left side
(board[2] == letter and board[5] == letter and board[8] == letter) or # down the middle
(board[3] == letter and board[6] == letter and board[9] == letter) or # down the right side
(board[3] == letter and board[5] == letter and board[7] == letter) or # diagonal
(board[1] == letter and board[5] == letter and board[9] == letter)) # diagonal
def get_board_copy(board):
"""Make a duplicate of the board list and return it the duplicate."""
return list(board)
def is_space_free(board, move):
"""Return true if the passed move is free on the passed board."""
return board[move] == ' '
def get_player_move(board):
"""Let the player type in their move."""
player_move = ' '
options = set(str(i) for i in range(1, len(board)))
while (player_move not in options or
not is_space_free(board, int(player_move))):
print('What is your next move? (1-9)')
player_move = input()
return int(player_move)
def choose_random_move_from_list(board, moves_list):
"""Returns a valid move from the passed list on the passed board or None
if there is no valid move."""
possible_moves = []
for i in moves_list:
if is_space_free(board, i):
possible_moves.append(i)
if possible_moves:
return random.choice(possible_moves)
def is_next_move_win(board, letter):
"""Returns true is if the given letter can make a winning move, false if not"""
for i in range(1, 10):
copy = get_board_copy(board)
if is_space_free(copy, i):
make_move(copy, letter, i)
if is_winner(copy, letter):
return i
def get_computer_move(board, temp_computer_letter):
"""Given a board and the computer's letter, determine where to move and return that move."""
if temp_computer_letter == 'X':
temp_player_letter = 'O'
else:
temp_player_letter = 'X'
# Here is our algorithm for our Tic Tac Toe AI:
# First, check if we can win in the next move
is_ai_winner = is_next_move_win(board, temp_computer_letter)
if is_ai_winner:
return is_ai_winner
# Check if the player could win on their next move, and block them.
is_player_winner = is_next_move_win(board, temp_player_letter)
if is_player_winner:
return is_player_winner
# Try to take one of the corners, if they are free.
move = choose_random_move_from_list(board, [1, 3, 7, 9])
if move is not None:
return move
# Try to take the center, if it is free.
if is_space_free(board, 5):
return 5
# Move on one of the sides.
return choose_random_move_from_list(board, [2, 4, 6, 8])
def is_board_full(board):
"""Return True if every space on the board has been taken.
Otherwise return False."""
for i in range(1, len(board)):
if is_space_free(board, i):
return False
return True
def start_new_round(board, temp_player_letter, temp_computer_letter, temp_turn):
"""Starts a round and plays it through untill the player and computer takes their turn"""
while True:
if temp_turn == 'player':
# Player’s turn.
draw_board(board)
move = get_player_move(board)
make_move(board, temp_player_letter, move)
if is_winner(board, temp_player_letter):
draw_board(board)
print('Hooray! You have won the game!')
break
temp_turn = 'computer'
else:
# Computer’s turn.
move = get_computer_move(board, temp_computer_letter)
make_move(board, temp_computer_letter, move)
if is_winner(board, temp_computer_letter):
draw_board(board)
print('The computer has beaten you! You lose.')
break
temp_turn = 'player'
if is_board_full(board):
draw_board(board)
print('The game is a tie!')
break
def start_session(board_size=10):
"""Starts a session for playing mutliple games with the bot"""
print('Welcome to Tic Tac Toe!')
while True:
# Reset the board
the_board = [' '] * board_size
player_letter, computer_letter = input_player_letter()
turn = who_goes_first()
print('The ' + turn + ' will go first.')
start_new_round(the_board, player_letter, computer_letter, turn)
if not play_again():
break
if __name__ == '__main__':
start_session()
| 36.025 | 98 | 0.624427 |
import random
# function because that would be drastically changing how the
# code works. Instead of creating a normal tic tac toe game like intended,
# it would add a new feature for creating larger boards, no longer making this
# refactoring but adding a new feature.
def draw_board(board):
# "board" is a list of 10 strings representing the board (ignore index 0)
print(' | |')
print(' ' + board[1] + ' | ' + board[2] + ' | ' + board[3])
print(' | |')
print('-----------')
print(' | |')
print(' ' + board[4] + ' | ' + board[5] + ' | ' + board[6])
print(' | |')
print('-----------')
print(' | |')
print(' ' + board[7] + ' | ' + board[8] + ' | ' + board[9])
print(' | |')
def input_player_letter():
letter = ''
while letter not in ('X', 'O'):
print('Do you want to be X or O?')
letter = input().upper()
# the first element in the list is the player’s letter, the second is the computer's letter.
if letter == 'X':
return ['X', 'O']
return ['O', 'X']
def who_goes_first():
if random.randint(0, 1) == 0:
return 'computer'
return 'player'
def play_again():
print('Do you want to play again? (yes or no)')
return input().lower().startswith('y')
def make_move(board, letter, move):
board[move] = letter
def is_winner(board, letter):
return ((board[1] == letter and board[2] == letter and board[3] == letter) or
(board[4] == letter and board[5] == letter and board[6] == letter) or
(board[7] == letter and board[8] == letter and board[9] == letter) or
(board[1] == letter and board[4] == letter and board[7] == letter) or
(board[2] == letter and board[5] == letter and board[8] == letter) or
(board[3] == letter and board[6] == letter and board[9] == letter) or
(board[3] == letter and board[5] == letter and board[7] == letter) or
(board[1] == letter and board[5] == letter and board[9] == letter))
def get_board_copy(board):
return list(board)
def is_space_free(board, move):
return board[move] == ' '
def get_player_move(board):
player_move = ' '
options = set(str(i) for i in range(1, len(board)))
while (player_move not in options or
not is_space_free(board, int(player_move))):
print('What is your next move? (1-9)')
player_move = input()
return int(player_move)
def choose_random_move_from_list(board, moves_list):
possible_moves = []
for i in moves_list:
if is_space_free(board, i):
possible_moves.append(i)
if possible_moves:
return random.choice(possible_moves)
def is_next_move_win(board, letter):
for i in range(1, 10):
copy = get_board_copy(board)
if is_space_free(copy, i):
make_move(copy, letter, i)
if is_winner(copy, letter):
return i
def get_computer_move(board, temp_computer_letter):
if temp_computer_letter == 'X':
temp_player_letter = 'O'
else:
temp_player_letter = 'X'
is_ai_winner = is_next_move_win(board, temp_computer_letter)
if is_ai_winner:
return is_ai_winner
is_player_winner = is_next_move_win(board, temp_player_letter)
if is_player_winner:
return is_player_winner
move = choose_random_move_from_list(board, [1, 3, 7, 9])
if move is not None:
return move
if is_space_free(board, 5):
return 5
return choose_random_move_from_list(board, [2, 4, 6, 8])
def is_board_full(board):
for i in range(1, len(board)):
if is_space_free(board, i):
return False
return True
def start_new_round(board, temp_player_letter, temp_computer_letter, temp_turn):
while True:
if temp_turn == 'player':
draw_board(board)
move = get_player_move(board)
make_move(board, temp_player_letter, move)
if is_winner(board, temp_player_letter):
draw_board(board)
print('Hooray! You have won the game!')
break
temp_turn = 'computer'
else:
move = get_computer_move(board, temp_computer_letter)
make_move(board, temp_computer_letter, move)
if is_winner(board, temp_computer_letter):
draw_board(board)
print('The computer has beaten you! You lose.')
break
temp_turn = 'player'
if is_board_full(board):
draw_board(board)
print('The game is a tie!')
break
def start_session(board_size=10):
print('Welcome to Tic Tac Toe!')
while True:
the_board = [' '] * board_size
player_letter, computer_letter = input_player_letter()
turn = who_goes_first()
print('The ' + turn + ' will go first.')
start_new_round(the_board, player_letter, computer_letter, turn)
if not play_again():
break
if __name__ == '__main__':
start_session()
| true | true |
f721131d0c71c26b6d07fafc53e439f251dd92fe | 18,055 | py | Python | test/test_l2bd_arp_term.py | snergfdio/vppclone | a288f8a1020eb74687eeb0a0a771977ce9b0c01d | [
"Apache-2.0"
] | null | null | null | test/test_l2bd_arp_term.py | snergfdio/vppclone | a288f8a1020eb74687eeb0a0a771977ce9b0c01d | [
"Apache-2.0"
] | 1 | 2021-06-01T23:30:08.000Z | 2021-06-01T23:30:08.000Z | test/test_l2bd_arp_term.py | snergfdio/vppclone | a288f8a1020eb74687eeb0a0a771977ce9b0c01d | [
"Apache-2.0"
] | 1 | 2019-03-11T19:28:31.000Z | 2019-03-11T19:28:31.000Z | #!/usr/bin/env python
""" L2BD ARP term Test """
import unittest
import random
import copy
from socket import AF_INET, AF_INET6
from scapy.packet import Raw
from scapy.layers.l2 import Ether, ARP
from scapy.layers.inet import IP
from scapy.utils import inet_pton, inet_ntop
from scapy.utils6 import in6_getnsma, in6_getnsmac, in6_ptop, in6_islladdr, \
in6_mactoifaceid, in6_ismaddr
from scapy.layers.inet6 import IPv6, UDP, ICMPv6ND_NS, ICMPv6ND_RS, \
ICMPv6ND_RA, ICMPv6NDOptSrcLLAddr, getmacbyip6, ICMPv6MRD_Solicitation, \
ICMPv6NDOptMTU, ICMPv6NDOptSrcLLAddr, ICMPv6NDOptPrefixInfo, \
ICMPv6ND_NA, ICMPv6NDOptDstLLAddr, ICMPv6DestUnreach, icmp6types
from framework import VppTestCase, VppTestRunner
from util import Host, ppp
class TestL2bdArpTerm(VppTestCase):
""" L2BD arp termination Test Case """
@classmethod
def setUpClass(cls):
"""
Perform standard class setup (defined by class method setUpClass in
class VppTestCase) before running the test case, set test case related
variables and configure VPP.
"""
super(TestL2bdArpTerm, cls).setUpClass()
try:
# Create pg interfaces
n_bd = 1
cls.ifs_per_bd = ifs_per_bd = 3
n_ifs = n_bd * ifs_per_bd
cls.create_pg_interfaces(range(n_ifs))
# Set up all interfaces
for i in cls.pg_interfaces:
i.admin_up()
cls.hosts = set()
except Exception:
super(TestL2bdArpTerm, cls).tearDownClass()
raise
def setUp(self):
"""
Clear trace and packet infos before running each test.
"""
self.reset_packet_infos()
super(TestL2bdArpTerm, self).setUp()
def tearDown(self):
"""
Show various debug prints after each test.
"""
super(TestL2bdArpTerm, self).tearDown()
if not self.vpp_dead:
self.logger.info(self.vapi.ppcli("show l2fib verbose"))
self.logger.info(self.vapi.ppcli("show bridge-domain 1 detail"))
def add_del_arp_term_hosts(self, entries, bd_id=1, is_add=1, is_ipv6=0):
for e in entries:
ip = e.ip4 if is_ipv6 == 0 else e.ip6
self.vapi.bd_ip_mac_add_del(bd_id=bd_id, is_add=is_add, ip=ip,
mac=e.mac)
@classmethod
def mac_list(cls, b6_range):
return ["00:00:ca:fe:00:%02x" % b6 for b6 in b6_range]
@classmethod
def ip4_host(cls, subnet, host, mac):
return Host(mac=mac,
ip4="172.17.1%02u.%u" % (subnet, host))
@classmethod
def ip4_hosts(cls, subnet, start, mac_list):
return {cls.ip4_host(subnet, start + j, mac_list[j])
for j in range(len(mac_list))}
@classmethod
def ip6_host(cls, subnet, host, mac):
return Host(mac=mac,
ip6="fd01:%x::%x" % (subnet, host))
@classmethod
def ip6_hosts(cls, subnet, start, mac_list):
return {cls.ip6_host(subnet, start + j, mac_list[j])
for j in range(len(mac_list))}
@classmethod
def bd_swifs(cls, b):
n = cls.ifs_per_bd
start = (b - 1) * n
return [cls.pg_interfaces[j] for j in range(start, start + n)]
def bd_add_del(self, bd_id=1, is_add=1):
if is_add:
self.vapi.bridge_domain_add_del(bd_id=bd_id, is_add=is_add)
for swif in self.bd_swifs(bd_id):
swif_idx = swif.sw_if_index
self.vapi.sw_interface_set_l2_bridge(
swif_idx, bd_id=bd_id, enable=is_add)
if not is_add:
self.vapi.bridge_domain_add_del(bd_id=bd_id, is_add=is_add)
@classmethod
def arp_req(cls, src_host, host):
return (Ether(dst="ff:ff:ff:ff:ff:ff", src=src_host.mac) /
ARP(op="who-has",
hwsrc=src_host.bin_mac,
pdst=host.ip4,
psrc=src_host.ip4))
@classmethod
def arp_reqs(cls, src_host, entries):
return [cls.arp_req(src_host, e) for e in entries]
@classmethod
def garp_req(cls, host):
return cls.arp_req(host, host)
@classmethod
def garp_reqs(cls, entries):
return [cls.garp_req(e) for e in entries]
def arp_resp_host(self, src_host, arp_resp):
ether = arp_resp[Ether]
self.assertEqual(ether.dst, src_host.mac)
arp = arp_resp[ARP]
self.assertEqual(arp.hwtype, 1)
self.assertEqual(arp.ptype, 0x800)
self.assertEqual(arp.hwlen, 6)
self.assertEqual(arp.plen, 4)
arp_opts = {"who-has": 1, "is-at": 2}
self.assertEqual(arp.op, arp_opts["is-at"])
self.assertEqual(arp.hwdst, src_host.mac)
self.assertEqual(arp.pdst, src_host.ip4)
return Host(mac=arp.hwsrc, ip4=arp.psrc)
def arp_resp_hosts(self, src_host, pkts):
return {self.arp_resp_host(src_host, p) for p in pkts}
@staticmethod
def inttoip4(ip):
o1 = int(ip / 16777216) % 256
o2 = int(ip / 65536) % 256
o3 = int(ip / 256) % 256
o4 = int(ip) % 256
return '%s.%s.%s.%s' % (o1, o2, o3, o4)
def arp_event_host(self, e):
return Host(str(e.mac), ip4=str(e.ip))
def arp_event_hosts(self, evs):
return {self.arp_event_host(e) for e in evs}
def nd_event_host(self, e):
return Host(str(e.mac), ip6=str(e.ip))
def nd_event_hosts(self, evs):
return {self.nd_event_host(e) for e in evs}
@classmethod
def ns_req(cls, src_host, host):
nsma = in6_getnsma(inet_pton(AF_INET6, "fd10::ffff"))
d = inet_ntop(AF_INET6, nsma)
return (Ether(dst="ff:ff:ff:ff:ff:ff", src=src_host.mac) /
IPv6(dst=d, src=src_host.ip6) /
ICMPv6ND_NS(tgt=host.ip6) /
ICMPv6NDOptSrcLLAddr(lladdr=src_host.mac))
@classmethod
def ns_reqs_dst(cls, entries, dst_host):
return [cls.ns_req(e, dst_host) for e in entries]
@classmethod
def ns_reqs_src(cls, src_host, entries):
return [cls.ns_req(src_host, e) for e in entries]
def na_resp_host(self, src_host, rx):
self.assertEqual(rx[Ether].dst, src_host.mac)
self.assertEqual(in6_ptop(rx[IPv6].dst),
in6_ptop(src_host.ip6))
self.assertTrue(rx.haslayer(ICMPv6ND_NA))
self.assertTrue(rx.haslayer(ICMPv6NDOptDstLLAddr))
na = rx[ICMPv6ND_NA]
return Host(mac=na.lladdr, ip6=na.tgt)
def na_resp_hosts(self, src_host, pkts):
return {self.na_resp_host(src_host, p) for p in pkts}
def set_bd_flags(self, bd_id, **args):
"""
Enable/disable defined feature(s) of the bridge domain.
:param int bd_id: Bridge domain ID.
:param list args: List of feature/status pairs. Allowed features: \
learn, forward, flood, uu_flood and arp_term. Status False means \
disable, status True means enable the feature.
:raise: ValueError in case of unknown feature in the input.
"""
for flag in args:
if flag == "learn":
feature_bitmap = 1 << 0
elif flag == "forward":
feature_bitmap = 1 << 1
elif flag == "flood":
feature_bitmap = 1 << 2
elif flag == "uu_flood":
feature_bitmap = 1 << 3
elif flag == "arp_term":
feature_bitmap = 1 << 4
else:
raise ValueError("Unknown feature used: %s" % flag)
is_set = 1 if args[flag] else 0
self.vapi.bridge_flags(bd_id, is_set, feature_bitmap)
self.logger.info("Bridge domain ID %d updated" % bd_id)
def verify_arp(self, src_host, req_hosts, resp_hosts, bd_id=1):
reqs = self.arp_reqs(src_host, req_hosts)
for swif in self.bd_swifs(bd_id):
swif.add_stream(reqs)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
for swif in self.bd_swifs(bd_id):
resp_pkts = swif.get_capture(len(resp_hosts))
resps = self.arp_resp_hosts(src_host, resp_pkts)
self.assertEqual(len(resps ^ resp_hosts), 0)
def verify_nd(self, src_host, req_hosts, resp_hosts, bd_id=1):
reqs = self.ns_reqs_src(src_host, req_hosts)
for swif in self.bd_swifs(bd_id):
swif.add_stream(reqs)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
for swif in self.bd_swifs(bd_id):
resp_pkts = swif.get_capture(len(resp_hosts))
resps = self.na_resp_hosts(src_host, resp_pkts)
self.assertEqual(len(resps ^ resp_hosts), 0)
def test_l2bd_arp_term_01(self):
""" L2BD arp term - add 5 hosts, verify arp responses
"""
src_host = self.ip4_host(50, 50, "00:00:11:22:33:44")
self.bd_add_del(1, is_add=1)
self.set_bd_flags(1, arp_term=True, flood=False,
uu_flood=False, learn=False)
macs = self.mac_list(range(1, 5))
hosts = self.ip4_hosts(4, 1, macs)
self.add_del_arp_term_hosts(hosts, is_add=1)
self.verify_arp(src_host, hosts, hosts)
type(self).hosts = hosts
def test_l2bd_arp_term_02(self):
""" L2BD arp term - delete 3 hosts, verify arp responses
"""
src_host = self.ip4_host(50, 50, "00:00:11:22:33:44")
macs = self.mac_list(range(1, 3))
deleted = self.ip4_hosts(4, 1, macs)
self.add_del_arp_term_hosts(deleted, is_add=0)
remaining = self.hosts - deleted
self.verify_arp(src_host, self.hosts, remaining)
type(self).hosts = remaining
self.bd_add_del(1, is_add=0)
def test_l2bd_arp_term_03(self):
""" L2BD arp term - recreate BD1, readd 3 hosts, verify arp responses
"""
src_host = self.ip4_host(50, 50, "00:00:11:22:33:44")
self.bd_add_del(1, is_add=1)
self.set_bd_flags(1, arp_term=True, flood=False,
uu_flood=False, learn=False)
macs = self.mac_list(range(1, 3))
readded = self.ip4_hosts(4, 1, macs)
self.add_del_arp_term_hosts(readded, is_add=1)
self.verify_arp(src_host, self.hosts | readded, readded)
type(self).hosts = readded
def test_l2bd_arp_term_04(self):
""" L2BD arp term - 2 IP4 addrs per host
"""
src_host = self.ip4_host(50, 50, "00:00:11:22:33:44")
macs = self.mac_list(range(1, 3))
sub5_hosts = self.ip4_hosts(5, 1, macs)
self.add_del_arp_term_hosts(sub5_hosts, is_add=1)
hosts = self.hosts | sub5_hosts
self.verify_arp(src_host, hosts, hosts)
type(self).hosts = hosts
self.bd_add_del(1, is_add=0)
def test_l2bd_arp_term_05(self):
""" L2BD arp term - create and update 10 IP4-mac pairs
"""
src_host = self.ip4_host(50, 50, "00:00:11:22:33:44")
self.bd_add_del(1, is_add=1)
self.set_bd_flags(1, arp_term=True, flood=False,
uu_flood=False, learn=False)
macs1 = self.mac_list(range(10, 20))
hosts1 = self.ip4_hosts(5, 1, macs1)
self.add_del_arp_term_hosts(hosts1, is_add=1)
self.verify_arp(src_host, hosts1, hosts1)
macs2 = self.mac_list(range(20, 30))
hosts2 = self.ip4_hosts(5, 1, macs2)
self.add_del_arp_term_hosts(hosts2, is_add=1)
self.verify_arp(src_host, hosts1, hosts2)
self.bd_add_del(1, is_add=0)
def test_l2bd_arp_term_06(self):
""" L2BD arp/ND term - hosts with both ip4/ip6
"""
src_host4 = self.ip4_host(50, 50, "00:00:11:22:33:44")
src_host6 = self.ip6_host(50, 50, "00:00:11:22:33:44")
self.bd_add_del(1, is_add=1)
# enable flood to make sure requests are not flooded
self.set_bd_flags(1, arp_term=True, flood=True,
uu_flood=False, learn=False)
macs = self.mac_list(range(10, 20))
hosts6 = self.ip6_hosts(5, 1, macs)
hosts4 = self.ip4_hosts(5, 1, macs)
self.add_del_arp_term_hosts(hosts4, is_add=1)
self.add_del_arp_term_hosts(hosts6, is_add=1, is_ipv6=1)
self.verify_arp(src_host4, hosts4, hosts4)
self.verify_nd(src_host6, hosts6, hosts6)
self.bd_add_del(1, is_add=0)
def test_l2bd_arp_term_07(self):
""" L2BD ND term - Add and Del hosts, verify ND replies
"""
src_host6 = self.ip6_host(50, 50, "00:00:11:22:33:44")
self.bd_add_del(1, is_add=1)
self.set_bd_flags(1, arp_term=True, flood=False,
uu_flood=False, learn=False)
macs = self.mac_list(range(10, 20))
hosts6 = self.ip6_hosts(5, 1, macs)
self.add_del_arp_term_hosts(hosts6, is_add=1, is_ipv6=1)
self.verify_nd(src_host6, hosts6, hosts6)
del_macs = self.mac_list(range(10, 15))
deleted = self.ip6_hosts(5, 1, del_macs)
self.add_del_arp_term_hosts(deleted, is_add=0, is_ipv6=1)
self.verify_nd(src_host6, hosts6, hosts6 - deleted)
self.bd_add_del(1, is_add=0)
def test_l2bd_arp_term_08(self):
""" L2BD ND term - Add and update IP+mac, verify ND replies
"""
src_host = self.ip6_host(50, 50, "00:00:11:22:33:44")
self.bd_add_del(1, is_add=1)
self.set_bd_flags(1, arp_term=True, flood=False,
uu_flood=False, learn=False)
macs1 = self.mac_list(range(10, 20))
hosts = self.ip6_hosts(5, 1, macs1)
self.add_del_arp_term_hosts(hosts, is_add=1, is_ipv6=1)
self.verify_nd(src_host, hosts, hosts)
macs2 = self.mac_list(range(20, 30))
updated = self.ip6_hosts(5, 1, macs2)
self.add_del_arp_term_hosts(updated, is_add=1, is_ipv6=1)
self.verify_nd(src_host, hosts, updated)
self.bd_add_del(1, is_add=0)
def test_l2bd_arp_term_09(self):
""" L2BD arp term - send garps, verify arp event reports
"""
self.vapi.want_ip4_arp_events()
self.bd_add_del(1, is_add=1)
self.set_bd_flags(1, arp_term=True, flood=False,
uu_flood=False, learn=False)
macs = self.mac_list(range(90, 95))
hosts = self.ip4_hosts(5, 1, macs)
garps = self.garp_reqs(hosts)
self.bd_swifs(1)[0].add_stream(garps)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
evs = [self.vapi.wait_for_event(1, "ip4_arp_event")
for i in range(len(hosts))]
ev_hosts = self.arp_event_hosts(evs)
self.assertEqual(len(ev_hosts ^ hosts), 0)
def test_l2bd_arp_term_10(self):
""" L2BD arp term - send duplicate garps, verify suppression
"""
macs = self.mac_list(range(70, 71))
hosts = self.ip4_hosts(6, 1, macs)
""" send the packet 5 times expect one event
"""
garps = self.garp_reqs(hosts) * 5
self.bd_swifs(1)[0].add_stream(garps)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
evs = [self.vapi.wait_for_event(1, "ip4_arp_event")
for i in range(len(hosts))]
ev_hosts = self.arp_event_hosts(evs)
self.assertEqual(len(ev_hosts ^ hosts), 0)
def test_l2bd_arp_term_11(self):
""" L2BD arp term - disable ip4 arp events,send garps, verify no events
"""
self.vapi.want_ip4_arp_events(enable_disable=0)
macs = self.mac_list(range(90, 95))
hosts = self.ip4_hosts(5, 1, macs)
garps = self.garp_reqs(hosts)
self.bd_swifs(1)[0].add_stream(garps)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.sleep(1)
self.assertEqual(len(self.vapi.collect_events()), 0)
self.bd_add_del(1, is_add=0)
def test_l2bd_arp_term_12(self):
""" L2BD ND term - send NS packets verify reports
"""
self.vapi.want_ip6_nd_events(ip="::")
dst_host = self.ip6_host(50, 50, "00:00:11:22:33:44")
self.bd_add_del(1, is_add=1)
self.set_bd_flags(1, arp_term=True, flood=False,
uu_flood=False, learn=False)
macs = self.mac_list(range(10, 15))
hosts = self.ip6_hosts(5, 1, macs)
reqs = self.ns_reqs_dst(hosts, dst_host)
self.bd_swifs(1)[0].add_stream(reqs)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
evs = [self.vapi.wait_for_event(2, "ip6_nd_event")
for i in range(len(hosts))]
ev_hosts = self.nd_event_hosts(evs)
self.assertEqual(len(ev_hosts ^ hosts), 0)
def test_l2bd_arp_term_13(self):
""" L2BD ND term - send duplicate ns, verify suppression
"""
dst_host = self.ip6_host(50, 50, "00:00:11:22:33:44")
macs = self.mac_list(range(10, 11))
hosts = self.ip6_hosts(5, 1, macs)
reqs = self.ns_reqs_dst(hosts, dst_host) * 5
self.bd_swifs(1)[0].add_stream(reqs)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
evs = [self.vapi.wait_for_event(2, "ip6_nd_event")
for i in range(len(hosts))]
ev_hosts = self.nd_event_hosts(evs)
self.assertEqual(len(ev_hosts ^ hosts), 0)
def test_l2bd_arp_term_14(self):
""" L2BD ND term - disable ip4 arp events,send ns, verify no events
"""
self.vapi.want_ip6_nd_events(enable_disable=0, ip="::")
dst_host = self.ip6_host(50, 50, "00:00:11:22:33:44")
macs = self.mac_list(range(10, 15))
hosts = self.ip6_hosts(5, 1, macs)
reqs = self.ns_reqs_dst(hosts, dst_host)
self.bd_swifs(1)[0].add_stream(reqs)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.sleep(1)
self.assertEqual(len(self.vapi.collect_events()), 0)
self.bd_add_del(1, is_add=0)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| 36.92229 | 79 | 0.608419 |
import unittest
import random
import copy
from socket import AF_INET, AF_INET6
from scapy.packet import Raw
from scapy.layers.l2 import Ether, ARP
from scapy.layers.inet import IP
from scapy.utils import inet_pton, inet_ntop
from scapy.utils6 import in6_getnsma, in6_getnsmac, in6_ptop, in6_islladdr, \
in6_mactoifaceid, in6_ismaddr
from scapy.layers.inet6 import IPv6, UDP, ICMPv6ND_NS, ICMPv6ND_RS, \
ICMPv6ND_RA, ICMPv6NDOptSrcLLAddr, getmacbyip6, ICMPv6MRD_Solicitation, \
ICMPv6NDOptMTU, ICMPv6NDOptSrcLLAddr, ICMPv6NDOptPrefixInfo, \
ICMPv6ND_NA, ICMPv6NDOptDstLLAddr, ICMPv6DestUnreach, icmp6types
from framework import VppTestCase, VppTestRunner
from util import Host, ppp
class TestL2bdArpTerm(VppTestCase):
@classmethod
def setUpClass(cls):
super(TestL2bdArpTerm, cls).setUpClass()
try:
n_bd = 1
cls.ifs_per_bd = ifs_per_bd = 3
n_ifs = n_bd * ifs_per_bd
cls.create_pg_interfaces(range(n_ifs))
for i in cls.pg_interfaces:
i.admin_up()
cls.hosts = set()
except Exception:
super(TestL2bdArpTerm, cls).tearDownClass()
raise
def setUp(self):
self.reset_packet_infos()
super(TestL2bdArpTerm, self).setUp()
def tearDown(self):
super(TestL2bdArpTerm, self).tearDown()
if not self.vpp_dead:
self.logger.info(self.vapi.ppcli("show l2fib verbose"))
self.logger.info(self.vapi.ppcli("show bridge-domain 1 detail"))
def add_del_arp_term_hosts(self, entries, bd_id=1, is_add=1, is_ipv6=0):
for e in entries:
ip = e.ip4 if is_ipv6 == 0 else e.ip6
self.vapi.bd_ip_mac_add_del(bd_id=bd_id, is_add=is_add, ip=ip,
mac=e.mac)
@classmethod
def mac_list(cls, b6_range):
return ["00:00:ca:fe:00:%02x" % b6 for b6 in b6_range]
@classmethod
def ip4_host(cls, subnet, host, mac):
return Host(mac=mac,
ip4="172.17.1%02u.%u" % (subnet, host))
@classmethod
def ip4_hosts(cls, subnet, start, mac_list):
return {cls.ip4_host(subnet, start + j, mac_list[j])
for j in range(len(mac_list))}
@classmethod
def ip6_host(cls, subnet, host, mac):
return Host(mac=mac,
ip6="fd01:%x::%x" % (subnet, host))
@classmethod
def ip6_hosts(cls, subnet, start, mac_list):
return {cls.ip6_host(subnet, start + j, mac_list[j])
for j in range(len(mac_list))}
@classmethod
def bd_swifs(cls, b):
n = cls.ifs_per_bd
start = (b - 1) * n
return [cls.pg_interfaces[j] for j in range(start, start + n)]
def bd_add_del(self, bd_id=1, is_add=1):
if is_add:
self.vapi.bridge_domain_add_del(bd_id=bd_id, is_add=is_add)
for swif in self.bd_swifs(bd_id):
swif_idx = swif.sw_if_index
self.vapi.sw_interface_set_l2_bridge(
swif_idx, bd_id=bd_id, enable=is_add)
if not is_add:
self.vapi.bridge_domain_add_del(bd_id=bd_id, is_add=is_add)
@classmethod
def arp_req(cls, src_host, host):
return (Ether(dst="ff:ff:ff:ff:ff:ff", src=src_host.mac) /
ARP(op="who-has",
hwsrc=src_host.bin_mac,
pdst=host.ip4,
psrc=src_host.ip4))
@classmethod
def arp_reqs(cls, src_host, entries):
return [cls.arp_req(src_host, e) for e in entries]
@classmethod
def garp_req(cls, host):
return cls.arp_req(host, host)
@classmethod
def garp_reqs(cls, entries):
return [cls.garp_req(e) for e in entries]
def arp_resp_host(self, src_host, arp_resp):
ether = arp_resp[Ether]
self.assertEqual(ether.dst, src_host.mac)
arp = arp_resp[ARP]
self.assertEqual(arp.hwtype, 1)
self.assertEqual(arp.ptype, 0x800)
self.assertEqual(arp.hwlen, 6)
self.assertEqual(arp.plen, 4)
arp_opts = {"who-has": 1, "is-at": 2}
self.assertEqual(arp.op, arp_opts["is-at"])
self.assertEqual(arp.hwdst, src_host.mac)
self.assertEqual(arp.pdst, src_host.ip4)
return Host(mac=arp.hwsrc, ip4=arp.psrc)
def arp_resp_hosts(self, src_host, pkts):
return {self.arp_resp_host(src_host, p) for p in pkts}
@staticmethod
def inttoip4(ip):
o1 = int(ip / 16777216) % 256
o2 = int(ip / 65536) % 256
o3 = int(ip / 256) % 256
o4 = int(ip) % 256
return '%s.%s.%s.%s' % (o1, o2, o3, o4)
def arp_event_host(self, e):
return Host(str(e.mac), ip4=str(e.ip))
def arp_event_hosts(self, evs):
return {self.arp_event_host(e) for e in evs}
def nd_event_host(self, e):
return Host(str(e.mac), ip6=str(e.ip))
def nd_event_hosts(self, evs):
return {self.nd_event_host(e) for e in evs}
@classmethod
def ns_req(cls, src_host, host):
nsma = in6_getnsma(inet_pton(AF_INET6, "fd10::ffff"))
d = inet_ntop(AF_INET6, nsma)
return (Ether(dst="ff:ff:ff:ff:ff:ff", src=src_host.mac) /
IPv6(dst=d, src=src_host.ip6) /
ICMPv6ND_NS(tgt=host.ip6) /
ICMPv6NDOptSrcLLAddr(lladdr=src_host.mac))
@classmethod
def ns_reqs_dst(cls, entries, dst_host):
return [cls.ns_req(e, dst_host) for e in entries]
@classmethod
def ns_reqs_src(cls, src_host, entries):
return [cls.ns_req(src_host, e) for e in entries]
def na_resp_host(self, src_host, rx):
self.assertEqual(rx[Ether].dst, src_host.mac)
self.assertEqual(in6_ptop(rx[IPv6].dst),
in6_ptop(src_host.ip6))
self.assertTrue(rx.haslayer(ICMPv6ND_NA))
self.assertTrue(rx.haslayer(ICMPv6NDOptDstLLAddr))
na = rx[ICMPv6ND_NA]
return Host(mac=na.lladdr, ip6=na.tgt)
def na_resp_hosts(self, src_host, pkts):
return {self.na_resp_host(src_host, p) for p in pkts}
def set_bd_flags(self, bd_id, **args):
for flag in args:
if flag == "learn":
feature_bitmap = 1 << 0
elif flag == "forward":
feature_bitmap = 1 << 1
elif flag == "flood":
feature_bitmap = 1 << 2
elif flag == "uu_flood":
feature_bitmap = 1 << 3
elif flag == "arp_term":
feature_bitmap = 1 << 4
else:
raise ValueError("Unknown feature used: %s" % flag)
is_set = 1 if args[flag] else 0
self.vapi.bridge_flags(bd_id, is_set, feature_bitmap)
self.logger.info("Bridge domain ID %d updated" % bd_id)
def verify_arp(self, src_host, req_hosts, resp_hosts, bd_id=1):
reqs = self.arp_reqs(src_host, req_hosts)
for swif in self.bd_swifs(bd_id):
swif.add_stream(reqs)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
for swif in self.bd_swifs(bd_id):
resp_pkts = swif.get_capture(len(resp_hosts))
resps = self.arp_resp_hosts(src_host, resp_pkts)
self.assertEqual(len(resps ^ resp_hosts), 0)
def verify_nd(self, src_host, req_hosts, resp_hosts, bd_id=1):
reqs = self.ns_reqs_src(src_host, req_hosts)
for swif in self.bd_swifs(bd_id):
swif.add_stream(reqs)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
for swif in self.bd_swifs(bd_id):
resp_pkts = swif.get_capture(len(resp_hosts))
resps = self.na_resp_hosts(src_host, resp_pkts)
self.assertEqual(len(resps ^ resp_hosts), 0)
def test_l2bd_arp_term_01(self):
src_host = self.ip4_host(50, 50, "00:00:11:22:33:44")
self.bd_add_del(1, is_add=1)
self.set_bd_flags(1, arp_term=True, flood=False,
uu_flood=False, learn=False)
macs = self.mac_list(range(1, 5))
hosts = self.ip4_hosts(4, 1, macs)
self.add_del_arp_term_hosts(hosts, is_add=1)
self.verify_arp(src_host, hosts, hosts)
type(self).hosts = hosts
def test_l2bd_arp_term_02(self):
src_host = self.ip4_host(50, 50, "00:00:11:22:33:44")
macs = self.mac_list(range(1, 3))
deleted = self.ip4_hosts(4, 1, macs)
self.add_del_arp_term_hosts(deleted, is_add=0)
remaining = self.hosts - deleted
self.verify_arp(src_host, self.hosts, remaining)
type(self).hosts = remaining
self.bd_add_del(1, is_add=0)
def test_l2bd_arp_term_03(self):
src_host = self.ip4_host(50, 50, "00:00:11:22:33:44")
self.bd_add_del(1, is_add=1)
self.set_bd_flags(1, arp_term=True, flood=False,
uu_flood=False, learn=False)
macs = self.mac_list(range(1, 3))
readded = self.ip4_hosts(4, 1, macs)
self.add_del_arp_term_hosts(readded, is_add=1)
self.verify_arp(src_host, self.hosts | readded, readded)
type(self).hosts = readded
def test_l2bd_arp_term_04(self):
src_host = self.ip4_host(50, 50, "00:00:11:22:33:44")
macs = self.mac_list(range(1, 3))
sub5_hosts = self.ip4_hosts(5, 1, macs)
self.add_del_arp_term_hosts(sub5_hosts, is_add=1)
hosts = self.hosts | sub5_hosts
self.verify_arp(src_host, hosts, hosts)
type(self).hosts = hosts
self.bd_add_del(1, is_add=0)
def test_l2bd_arp_term_05(self):
src_host = self.ip4_host(50, 50, "00:00:11:22:33:44")
self.bd_add_del(1, is_add=1)
self.set_bd_flags(1, arp_term=True, flood=False,
uu_flood=False, learn=False)
macs1 = self.mac_list(range(10, 20))
hosts1 = self.ip4_hosts(5, 1, macs1)
self.add_del_arp_term_hosts(hosts1, is_add=1)
self.verify_arp(src_host, hosts1, hosts1)
macs2 = self.mac_list(range(20, 30))
hosts2 = self.ip4_hosts(5, 1, macs2)
self.add_del_arp_term_hosts(hosts2, is_add=1)
self.verify_arp(src_host, hosts1, hosts2)
self.bd_add_del(1, is_add=0)
def test_l2bd_arp_term_06(self):
src_host4 = self.ip4_host(50, 50, "00:00:11:22:33:44")
src_host6 = self.ip6_host(50, 50, "00:00:11:22:33:44")
self.bd_add_del(1, is_add=1)
self.set_bd_flags(1, arp_term=True, flood=True,
uu_flood=False, learn=False)
macs = self.mac_list(range(10, 20))
hosts6 = self.ip6_hosts(5, 1, macs)
hosts4 = self.ip4_hosts(5, 1, macs)
self.add_del_arp_term_hosts(hosts4, is_add=1)
self.add_del_arp_term_hosts(hosts6, is_add=1, is_ipv6=1)
self.verify_arp(src_host4, hosts4, hosts4)
self.verify_nd(src_host6, hosts6, hosts6)
self.bd_add_del(1, is_add=0)
def test_l2bd_arp_term_07(self):
src_host6 = self.ip6_host(50, 50, "00:00:11:22:33:44")
self.bd_add_del(1, is_add=1)
self.set_bd_flags(1, arp_term=True, flood=False,
uu_flood=False, learn=False)
macs = self.mac_list(range(10, 20))
hosts6 = self.ip6_hosts(5, 1, macs)
self.add_del_arp_term_hosts(hosts6, is_add=1, is_ipv6=1)
self.verify_nd(src_host6, hosts6, hosts6)
del_macs = self.mac_list(range(10, 15))
deleted = self.ip6_hosts(5, 1, del_macs)
self.add_del_arp_term_hosts(deleted, is_add=0, is_ipv6=1)
self.verify_nd(src_host6, hosts6, hosts6 - deleted)
self.bd_add_del(1, is_add=0)
def test_l2bd_arp_term_08(self):
src_host = self.ip6_host(50, 50, "00:00:11:22:33:44")
self.bd_add_del(1, is_add=1)
self.set_bd_flags(1, arp_term=True, flood=False,
uu_flood=False, learn=False)
macs1 = self.mac_list(range(10, 20))
hosts = self.ip6_hosts(5, 1, macs1)
self.add_del_arp_term_hosts(hosts, is_add=1, is_ipv6=1)
self.verify_nd(src_host, hosts, hosts)
macs2 = self.mac_list(range(20, 30))
updated = self.ip6_hosts(5, 1, macs2)
self.add_del_arp_term_hosts(updated, is_add=1, is_ipv6=1)
self.verify_nd(src_host, hosts, updated)
self.bd_add_del(1, is_add=0)
def test_l2bd_arp_term_09(self):
self.vapi.want_ip4_arp_events()
self.bd_add_del(1, is_add=1)
self.set_bd_flags(1, arp_term=True, flood=False,
uu_flood=False, learn=False)
macs = self.mac_list(range(90, 95))
hosts = self.ip4_hosts(5, 1, macs)
garps = self.garp_reqs(hosts)
self.bd_swifs(1)[0].add_stream(garps)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
evs = [self.vapi.wait_for_event(1, "ip4_arp_event")
for i in range(len(hosts))]
ev_hosts = self.arp_event_hosts(evs)
self.assertEqual(len(ev_hosts ^ hosts), 0)
def test_l2bd_arp_term_10(self):
macs = self.mac_list(range(70, 71))
hosts = self.ip4_hosts(6, 1, macs)
garps = self.garp_reqs(hosts) * 5
self.bd_swifs(1)[0].add_stream(garps)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
evs = [self.vapi.wait_for_event(1, "ip4_arp_event")
for i in range(len(hosts))]
ev_hosts = self.arp_event_hosts(evs)
self.assertEqual(len(ev_hosts ^ hosts), 0)
def test_l2bd_arp_term_11(self):
self.vapi.want_ip4_arp_events(enable_disable=0)
macs = self.mac_list(range(90, 95))
hosts = self.ip4_hosts(5, 1, macs)
garps = self.garp_reqs(hosts)
self.bd_swifs(1)[0].add_stream(garps)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.sleep(1)
self.assertEqual(len(self.vapi.collect_events()), 0)
self.bd_add_del(1, is_add=0)
def test_l2bd_arp_term_12(self):
self.vapi.want_ip6_nd_events(ip="::")
dst_host = self.ip6_host(50, 50, "00:00:11:22:33:44")
self.bd_add_del(1, is_add=1)
self.set_bd_flags(1, arp_term=True, flood=False,
uu_flood=False, learn=False)
macs = self.mac_list(range(10, 15))
hosts = self.ip6_hosts(5, 1, macs)
reqs = self.ns_reqs_dst(hosts, dst_host)
self.bd_swifs(1)[0].add_stream(reqs)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
evs = [self.vapi.wait_for_event(2, "ip6_nd_event")
for i in range(len(hosts))]
ev_hosts = self.nd_event_hosts(evs)
self.assertEqual(len(ev_hosts ^ hosts), 0)
def test_l2bd_arp_term_13(self):
dst_host = self.ip6_host(50, 50, "00:00:11:22:33:44")
macs = self.mac_list(range(10, 11))
hosts = self.ip6_hosts(5, 1, macs)
reqs = self.ns_reqs_dst(hosts, dst_host) * 5
self.bd_swifs(1)[0].add_stream(reqs)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
evs = [self.vapi.wait_for_event(2, "ip6_nd_event")
for i in range(len(hosts))]
ev_hosts = self.nd_event_hosts(evs)
self.assertEqual(len(ev_hosts ^ hosts), 0)
def test_l2bd_arp_term_14(self):
self.vapi.want_ip6_nd_events(enable_disable=0, ip="::")
dst_host = self.ip6_host(50, 50, "00:00:11:22:33:44")
macs = self.mac_list(range(10, 15))
hosts = self.ip6_hosts(5, 1, macs)
reqs = self.ns_reqs_dst(hosts, dst_host)
self.bd_swifs(1)[0].add_stream(reqs)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.sleep(1)
self.assertEqual(len(self.vapi.collect_events()), 0)
self.bd_add_del(1, is_add=0)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| true | true |
f721134f2cf6dd7f8af453cc2143cd6f38f7cc03 | 1,204 | py | Python | lagom/envs/record_episode_statistics.py | zuoxingdong/lagom | 3b6710804dbc79c6dffb369ac87c68f4055ab6cd | [
"MIT"
] | 383 | 2018-07-11T17:43:10.000Z | 2022-01-24T08:46:23.000Z | lagom/envs/record_episode_statistics.py | LorinChen/lagom | 273bb7f5babb1f250f6dba0b5f62c6614f301719 | [
"MIT"
] | 90 | 2018-07-11T23:51:45.000Z | 2021-12-16T08:56:42.000Z | lagom/envs/record_episode_statistics.py | LorinChen/lagom | 273bb7f5babb1f250f6dba0b5f62c6614f301719 | [
"MIT"
] | 32 | 2018-07-12T18:21:03.000Z | 2021-09-15T05:47:48.000Z | import time
from collections import deque
import gym
class RecordEpisodeStatistics(gym.Wrapper):
def __init__(self, env, deque_size=100):
super().__init__(env)
self.t0 = time.perf_counter()
self.episode_return = 0.0
self.episode_horizon = 0
self.return_queue = deque(maxlen=deque_size)
self.horizon_queue = deque(maxlen=deque_size)
def reset(self, **kwargs):
observation = super().reset(**kwargs)
self.episode_return = 0.0
self.episode_horizon = 0
return observation
def step(self, action):
observation, reward, done, info = super().step(action)
self.episode_return += reward
self.episode_horizon += 1
if done:
info['episode'] = {'return': self.episode_return,
'horizon': self.episode_horizon,
'time': round(time.perf_counter() - self.t0, 4)}
self.return_queue.append(self.episode_return)
self.horizon_queue.append(self.episode_horizon)
self.episode_return = 0.0
self.episode_horizon = 0
return observation, reward, done, info
| 34.4 | 79 | 0.599668 | import time
from collections import deque
import gym
class RecordEpisodeStatistics(gym.Wrapper):
def __init__(self, env, deque_size=100):
super().__init__(env)
self.t0 = time.perf_counter()
self.episode_return = 0.0
self.episode_horizon = 0
self.return_queue = deque(maxlen=deque_size)
self.horizon_queue = deque(maxlen=deque_size)
def reset(self, **kwargs):
observation = super().reset(**kwargs)
self.episode_return = 0.0
self.episode_horizon = 0
return observation
def step(self, action):
observation, reward, done, info = super().step(action)
self.episode_return += reward
self.episode_horizon += 1
if done:
info['episode'] = {'return': self.episode_return,
'horizon': self.episode_horizon,
'time': round(time.perf_counter() - self.t0, 4)}
self.return_queue.append(self.episode_return)
self.horizon_queue.append(self.episode_horizon)
self.episode_return = 0.0
self.episode_horizon = 0
return observation, reward, done, info
| true | true |
f721149609f8936e76f673d4273205ed140bf7b3 | 1,608 | py | Python | blog_auth/migrations/0001_initial.py | MicroPyramid/ngo-cms | 5f0baf69ce646ab6b895d3ae2f49b782630c9959 | [
"MIT"
] | 5 | 2019-08-12T17:56:25.000Z | 2021-08-31T04:36:42.000Z | blog_auth/migrations/0001_initial.py | MicroPyramid/ngo-cms | 5f0baf69ce646ab6b895d3ae2f49b782630c9959 | [
"MIT"
] | 12 | 2020-02-12T00:38:11.000Z | 2022-03-11T23:50:12.000Z | blog_auth/migrations/0001_initial.py | MicroPyramid/ngo-cms | 5f0baf69ce646ab6b895d3ae2f49b782630c9959 | [
"MIT"
] | 8 | 2019-06-19T18:54:02.000Z | 2021-01-05T19:31:30.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('email', models.EmailField(unique=True, max_length=75)),
('rpwd', models.CharField(max_length=20)),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('gender', models.CharField(default=b'Unknown', max_length=10, verbose_name=b'Gender', choices=[(b'Male', b'Male'), (b'Female', b'Female')])),
('join_date', models.DateTimeField(auto_now_add=True)),
('mobile', models.CharField(max_length=15)),
('user_type', models.CharField(default=b'user', max_length=10, verbose_name=b'UserType', choices=[(b'user', b'user'), (b'Admin', b'Admin')])),
('is_admin', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
| 43.459459 | 158 | 0.584577 |
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('email', models.EmailField(unique=True, max_length=75)),
('rpwd', models.CharField(max_length=20)),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('gender', models.CharField(default=b'Unknown', max_length=10, verbose_name=b'Gender', choices=[(b'Male', b'Male'), (b'Female', b'Female')])),
('join_date', models.DateTimeField(auto_now_add=True)),
('mobile', models.CharField(max_length=15)),
('user_type', models.CharField(default=b'user', max_length=10, verbose_name=b'UserType', choices=[(b'user', b'user'), (b'Admin', b'Admin')])),
('is_admin', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
| true | true |
f721152db9db3827adab40e0750d01f58df5decf | 15,018 | py | Python | cinder/tests/unit/zonemanager/test_brcd_fc_zone_client_cli.py | lightsey/cinder | e03d68e42e57a63f8d0f3e177fb4287290612b24 | [
"Apache-2.0"
] | 3 | 2015-04-02T21:44:36.000Z | 2016-04-29T21:19:04.000Z | cinder/tests/unit/zonemanager/test_brcd_fc_zone_client_cli.py | lightsey/cinder | e03d68e42e57a63f8d0f3e177fb4287290612b24 | [
"Apache-2.0"
] | 3 | 2016-04-29T21:45:26.000Z | 2016-05-04T19:41:23.000Z | cinder/tests/unit/zonemanager/test_brcd_fc_zone_client_cli.py | lightsey/cinder | e03d68e42e57a63f8d0f3e177fb4287290612b24 | [
"Apache-2.0"
] | 4 | 2016-01-27T00:25:52.000Z | 2021-03-25T19:54:08.000Z | # (c) Copyright 2016 Brocade Communications Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Unit tests for brcd fc zone client cli."""
from unittest import mock
from oslo_concurrency import processutils
from cinder import exception
from cinder import test
from cinder.zonemanager.drivers.brocade import (brcd_fc_zone_client_cli
as client_cli)
from cinder.zonemanager.drivers.brocade import exception as b_exception
import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant
nsshow = '20:1a:00:05:1e:e8:e3:29'
switch_data = [' N 011a00;2,3;20:1a:00:05:1e:e8:e3:29;\
20:1a:00:05:1e:e8:e3:29;na',
' Fabric Port Name: 20:1a:00:05:1e:e8:e3:29']
cfgactvshow = ['Effective configuration:\n',
' cfg:\tOpenStack_Cfg\t\n',
' zone:\topenstack50060b0000c26604201900051ee8e329\t\n',
'\t\t50:06:0b:00:00:c2:66:04\n',
'\t\t20:19:00:05:1e:e8:e3:29\n']
active_zoneset = {
'zones': {
'openstack50060b0000c26604201900051ee8e329':
['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']},
'active_zone_config': 'OpenStack_Cfg'}
active_zoneset_multiple_zones = {
'zones': {
'openstack50060b0000c26604201900051ee8e329':
['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'],
'openstack50060b0000c26602201900051ee8e327':
['50:06:0b:00:00:c2:66:02', '20:19:00:05:1e:e8:e3:27']},
'active_zone_config': 'OpenStack_Cfg'}
new_zone_memb_same = {
'openstack50060b0000c26604201900051ee8e329':
['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']}
new_zone_memb_not_same = {
'openstack50060b0000c26604201900051ee8e330':
['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:30']}
new_zone = {'openstack10000012345678902001009876543210':
['10:00:00:12:34:56:78:90', '20:01:00:98:76:54:32:10']}
new_zones = {'openstack10000012345678902001009876543210':
['10:00:00:12:34:56:78:90', '20:01:00:98:76:54:32:10'],
'openstack10000011111111112001001111111111':
['10:00:00:11:11:11:11:11', '20:01:00:11:11:11:11:11']}
zone_names_to_delete = 'openstack50060b0000c26604201900051ee8e329'
supported_firmware = ['Kernel: 2.6', 'Fabric OS: v7.0.1']
unsupported_firmware = ['Fabric OS: v6.2.1']
class TestBrcdFCZoneClientCLI(client_cli.BrcdFCZoneClientCLI, test.TestCase):
# override some of the functions
def __init__(self, *args, **kwargs):
test.TestCase.__init__(self, *args, **kwargs)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_get_switch_info')
def test_get_active_zone_set(self, get_switch_info_mock):
cmd_list = [zone_constant.GET_ACTIVE_ZONE_CFG]
get_switch_info_mock.return_value = cfgactvshow
active_zoneset_returned = self.get_active_zone_set()
get_switch_info_mock.assert_called_once_with(cmd_list)
self.assertDictEqual(active_zoneset, active_zoneset_returned)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test_get_active_zone_set_ssh_error(self, run_ssh_mock):
run_ssh_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(b_exception.BrocadeZoningCliException,
self.get_active_zone_set)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_cfg_save')
def test_add_zones_new_zone_no_activate(self, cfg_save_mock,
apply_zone_change_mock,
get_active_zs_mock):
get_active_zs_mock.return_value = active_zoneset
self.add_zones(new_zones, False, None)
self.assertEqual(1, get_active_zs_mock.call_count)
self.assertEqual(3, apply_zone_change_mock.call_count)
cfg_save_mock.assert_called_once_with()
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset')
def test_add_zones_new_zone_activate(self, activate_zoneset_mock,
apply_zone_change_mock,
get_active_zs_mock):
get_active_zs_mock.return_value = active_zoneset
self.add_zones(new_zone, True, active_zoneset)
self.assertEqual(2, apply_zone_change_mock.call_count)
activate_zoneset_mock.assert_called_once_with(
active_zoneset['active_zone_config'])
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
def test_update_zone_exists_memb_same(self, apply_zone_change_mock,
activate_zoneset_mock,
get_active_zs_mock):
get_active_zs_mock.return_value = active_zoneset
self.update_zones(new_zone_memb_same, True, zone_constant.ZONE_ADD,
active_zoneset)
self.assertEqual(1, apply_zone_change_mock.call_count)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
def test_update_zone_exists_memb_not_same(self, apply_zone_change_mock,
activate_zoneset_mock,
get_active_zs_mock):
get_active_zs_mock.return_value = active_zoneset
self.update_zones(new_zone_memb_not_same, True,
zone_constant.ZONE_ADD, active_zoneset)
self.assertEqual(1, apply_zone_change_mock.call_count)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
def test_add_zone_all_exists_memb_not_same(self, apply_zone_change_mock,
activate_zoneset_mock,
get_active_zs_mock):
self.add_zones(new_zone_memb_not_same, True, active_zoneset)
call_args = apply_zone_change_mock.call_args[0][0]
self.assertEqual(0, get_active_zs_mock.call_count)
self.assertEqual(2, apply_zone_change_mock.call_count)
self.assertIn(zone_constant.CFG_ADD.strip(), call_args)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute')
def test_activate_zoneset(self, ssh_execute_mock):
ssh_execute_mock.return_value = True
return_value = self.activate_zoneset('zoneset1')
self.assertTrue(return_value)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute')
def test_deactivate_zoneset(self, ssh_execute_mock):
ssh_execute_mock.return_value = True
return_value = self.deactivate_zoneset()
self.assertTrue(return_value)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_cfg_save')
def test_delete_zones_activate_false(self, cfg_save_mock,
apply_zone_change_mock):
with mock.patch.object(self, '_zone_delete') as zone_delete_mock:
self.delete_zones(zone_names_to_delete, False,
active_zoneset_multiple_zones)
self.assertEqual(1, apply_zone_change_mock.call_count)
zone_delete_mock.assert_called_once_with(zone_names_to_delete)
cfg_save_mock.assert_called_once_with()
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset')
def test_delete_zones_activate_true(self, activate_zs_mock,
apply_zone_change_mock):
with mock.patch.object(self, '_zone_delete') \
as zone_delete_mock:
self.delete_zones(zone_names_to_delete, True,
active_zoneset_multiple_zones)
self.assertEqual(1, apply_zone_change_mock.call_count)
zone_delete_mock.assert_called_once_with(zone_names_to_delete)
activate_zs_mock.assert_called_once_with(
active_zoneset['active_zone_config'])
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_get_switch_info')
def test_get_nameserver_info(self, get_switch_info_mock):
ns_info_list_expected = ['20:1a:00:05:1e:e8:e3:29']
get_switch_info_mock.return_value = (switch_data)
ns_info_list = self.get_nameserver_info()
self.assertEqual(ns_info_list_expected, ns_info_list)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test_get_nameserver_info_ssh_error(self, run_ssh_mock):
run_ssh_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(b_exception.BrocadeZoningCliException,
self.get_nameserver_info)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute')
def test__cfg_save(self, ssh_execute_mock):
cmd_list = [zone_constant.CFG_SAVE]
self._cfg_save()
ssh_execute_mock.assert_called_once_with(cmd_list, True, 1)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
def test__zone_delete(self, apply_zone_change_mock):
zone_name = 'testzone'
cmd_list = ['zonedelete', '"testzone"']
self._zone_delete(zone_name)
apply_zone_change_mock.assert_called_once_with(cmd_list)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
def test__cfg_trans_abort(self, apply_zone_change_mock):
cmd_list = [zone_constant.CFG_ZONE_TRANS_ABORT]
with mock.patch.object(self, '_is_trans_abortable') \
as is_trans_abortable_mock:
is_trans_abortable_mock.return_value = True
self._cfg_trans_abort()
is_trans_abortable_mock.assert_called_once_with()
apply_zone_change_mock.assert_called_once_with(cmd_list)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test__is_trans_abortable_true(self, run_ssh_mock):
cmd_list = [zone_constant.CFG_SHOW_TRANS]
run_ssh_mock.return_value = (Stream(zone_constant.TRANS_ABORTABLE),
None)
data = self._is_trans_abortable()
self.assertTrue(data)
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test__is_trans_abortable_ssh_error(self, run_ssh_mock):
run_ssh_mock.return_value = (Stream(), Stream())
self.assertRaises(b_exception.BrocadeZoningCliException,
self._is_trans_abortable)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test__is_trans_abortable_false(self, run_ssh_mock):
cmd_list = [zone_constant.CFG_SHOW_TRANS]
cfgtransshow = 'There is no outstanding zoning transaction'
run_ssh_mock.return_value = (Stream(cfgtransshow), None)
data = self._is_trans_abortable()
self.assertFalse(data)
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test_apply_zone_change(self, run_ssh_mock):
cmd_list = [zone_constant.CFG_SAVE]
run_ssh_mock.return_value = (None, None)
self.apply_zone_change(cmd_list)
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test__get_switch_info(self, run_ssh_mock):
cmd_list = [zone_constant.NS_SHOW]
nsshow_list = [nsshow]
run_ssh_mock.return_value = (Stream(nsshow), Stream())
switch_data = self._get_switch_info(cmd_list)
self.assertEqual(nsshow_list, switch_data)
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
def test__parse_ns_output(self):
invalid_switch_data = [' N 011a00;20:1a:00:05:1e:e8:e3:29']
expected_wwn_list = ['20:1a:00:05:1e:e8:e3:29']
return_wwn_list = self._parse_ns_output(switch_data)
self.assertEqual(expected_wwn_list, return_wwn_list)
self.assertRaises(exception.InvalidParameterValue,
self._parse_ns_output, invalid_switch_data)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd')
def test_is_supported_firmware(self, exec_shell_cmd_mock):
exec_shell_cmd_mock.return_value = (supported_firmware, None)
self.assertTrue(self.is_supported_firmware())
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd')
def test_is_supported_firmware_invalid(self, exec_shell_cmd_mock):
exec_shell_cmd_mock.return_value = (unsupported_firmware, None)
self.assertFalse(self.is_supported_firmware())
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd')
def test_is_supported_firmware_no_ssh_response(self, exec_shell_cmd_mock):
exec_shell_cmd_mock.return_value = (None, Stream())
self.assertFalse(self.is_supported_firmware())
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd')
def test_is_supported_firmware_ssh_error(self, exec_shell_cmd_mock):
exec_shell_cmd_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(b_exception.BrocadeZoningCliException,
self.is_supported_firmware)
class Channel(object):
def recv_exit_status(self):
return 0
class Stream(object):
def __init__(self, buffer=''):
self.buffer = buffer
self.channel = Channel()
def readlines(self):
return self.buffer
def splitlines(self):
return self.buffer.splitlines()
def close(self):
pass
def flush(self):
self.buffer = ''
| 48.289389 | 78 | 0.699827 |
from unittest import mock
from oslo_concurrency import processutils
from cinder import exception
from cinder import test
from cinder.zonemanager.drivers.brocade import (brcd_fc_zone_client_cli
as client_cli)
from cinder.zonemanager.drivers.brocade import exception as b_exception
import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant
nsshow = '20:1a:00:05:1e:e8:e3:29'
switch_data = [' N 011a00;2,3;20:1a:00:05:1e:e8:e3:29;\
20:1a:00:05:1e:e8:e3:29;na',
' Fabric Port Name: 20:1a:00:05:1e:e8:e3:29']
cfgactvshow = ['Effective configuration:\n',
' cfg:\tOpenStack_Cfg\t\n',
' zone:\topenstack50060b0000c26604201900051ee8e329\t\n',
'\t\t50:06:0b:00:00:c2:66:04\n',
'\t\t20:19:00:05:1e:e8:e3:29\n']
active_zoneset = {
'zones': {
'openstack50060b0000c26604201900051ee8e329':
['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']},
'active_zone_config': 'OpenStack_Cfg'}
active_zoneset_multiple_zones = {
'zones': {
'openstack50060b0000c26604201900051ee8e329':
['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'],
'openstack50060b0000c26602201900051ee8e327':
['50:06:0b:00:00:c2:66:02', '20:19:00:05:1e:e8:e3:27']},
'active_zone_config': 'OpenStack_Cfg'}
new_zone_memb_same = {
'openstack50060b0000c26604201900051ee8e329':
['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']}
new_zone_memb_not_same = {
'openstack50060b0000c26604201900051ee8e330':
['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:30']}
new_zone = {'openstack10000012345678902001009876543210':
['10:00:00:12:34:56:78:90', '20:01:00:98:76:54:32:10']}
new_zones = {'openstack10000012345678902001009876543210':
['10:00:00:12:34:56:78:90', '20:01:00:98:76:54:32:10'],
'openstack10000011111111112001001111111111':
['10:00:00:11:11:11:11:11', '20:01:00:11:11:11:11:11']}
zone_names_to_delete = 'openstack50060b0000c26604201900051ee8e329'
supported_firmware = ['Kernel: 2.6', 'Fabric OS: v7.0.1']
unsupported_firmware = ['Fabric OS: v6.2.1']
class TestBrcdFCZoneClientCLI(client_cli.BrcdFCZoneClientCLI, test.TestCase):
def __init__(self, *args, **kwargs):
test.TestCase.__init__(self, *args, **kwargs)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_get_switch_info')
def test_get_active_zone_set(self, get_switch_info_mock):
cmd_list = [zone_constant.GET_ACTIVE_ZONE_CFG]
get_switch_info_mock.return_value = cfgactvshow
active_zoneset_returned = self.get_active_zone_set()
get_switch_info_mock.assert_called_once_with(cmd_list)
self.assertDictEqual(active_zoneset, active_zoneset_returned)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test_get_active_zone_set_ssh_error(self, run_ssh_mock):
run_ssh_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(b_exception.BrocadeZoningCliException,
self.get_active_zone_set)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_cfg_save')
def test_add_zones_new_zone_no_activate(self, cfg_save_mock,
apply_zone_change_mock,
get_active_zs_mock):
get_active_zs_mock.return_value = active_zoneset
self.add_zones(new_zones, False, None)
self.assertEqual(1, get_active_zs_mock.call_count)
self.assertEqual(3, apply_zone_change_mock.call_count)
cfg_save_mock.assert_called_once_with()
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset')
def test_add_zones_new_zone_activate(self, activate_zoneset_mock,
apply_zone_change_mock,
get_active_zs_mock):
get_active_zs_mock.return_value = active_zoneset
self.add_zones(new_zone, True, active_zoneset)
self.assertEqual(2, apply_zone_change_mock.call_count)
activate_zoneset_mock.assert_called_once_with(
active_zoneset['active_zone_config'])
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
def test_update_zone_exists_memb_same(self, apply_zone_change_mock,
activate_zoneset_mock,
get_active_zs_mock):
get_active_zs_mock.return_value = active_zoneset
self.update_zones(new_zone_memb_same, True, zone_constant.ZONE_ADD,
active_zoneset)
self.assertEqual(1, apply_zone_change_mock.call_count)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
def test_update_zone_exists_memb_not_same(self, apply_zone_change_mock,
activate_zoneset_mock,
get_active_zs_mock):
get_active_zs_mock.return_value = active_zoneset
self.update_zones(new_zone_memb_not_same, True,
zone_constant.ZONE_ADD, active_zoneset)
self.assertEqual(1, apply_zone_change_mock.call_count)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
def test_add_zone_all_exists_memb_not_same(self, apply_zone_change_mock,
activate_zoneset_mock,
get_active_zs_mock):
self.add_zones(new_zone_memb_not_same, True, active_zoneset)
call_args = apply_zone_change_mock.call_args[0][0]
self.assertEqual(0, get_active_zs_mock.call_count)
self.assertEqual(2, apply_zone_change_mock.call_count)
self.assertIn(zone_constant.CFG_ADD.strip(), call_args)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute')
def test_activate_zoneset(self, ssh_execute_mock):
ssh_execute_mock.return_value = True
return_value = self.activate_zoneset('zoneset1')
self.assertTrue(return_value)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute')
def test_deactivate_zoneset(self, ssh_execute_mock):
ssh_execute_mock.return_value = True
return_value = self.deactivate_zoneset()
self.assertTrue(return_value)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_cfg_save')
def test_delete_zones_activate_false(self, cfg_save_mock,
apply_zone_change_mock):
with mock.patch.object(self, '_zone_delete') as zone_delete_mock:
self.delete_zones(zone_names_to_delete, False,
active_zoneset_multiple_zones)
self.assertEqual(1, apply_zone_change_mock.call_count)
zone_delete_mock.assert_called_once_with(zone_names_to_delete)
cfg_save_mock.assert_called_once_with()
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset')
def test_delete_zones_activate_true(self, activate_zs_mock,
apply_zone_change_mock):
with mock.patch.object(self, '_zone_delete') \
as zone_delete_mock:
self.delete_zones(zone_names_to_delete, True,
active_zoneset_multiple_zones)
self.assertEqual(1, apply_zone_change_mock.call_count)
zone_delete_mock.assert_called_once_with(zone_names_to_delete)
activate_zs_mock.assert_called_once_with(
active_zoneset['active_zone_config'])
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_get_switch_info')
def test_get_nameserver_info(self, get_switch_info_mock):
ns_info_list_expected = ['20:1a:00:05:1e:e8:e3:29']
get_switch_info_mock.return_value = (switch_data)
ns_info_list = self.get_nameserver_info()
self.assertEqual(ns_info_list_expected, ns_info_list)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test_get_nameserver_info_ssh_error(self, run_ssh_mock):
run_ssh_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(b_exception.BrocadeZoningCliException,
self.get_nameserver_info)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute')
def test__cfg_save(self, ssh_execute_mock):
cmd_list = [zone_constant.CFG_SAVE]
self._cfg_save()
ssh_execute_mock.assert_called_once_with(cmd_list, True, 1)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
def test__zone_delete(self, apply_zone_change_mock):
zone_name = 'testzone'
cmd_list = ['zonedelete', '"testzone"']
self._zone_delete(zone_name)
apply_zone_change_mock.assert_called_once_with(cmd_list)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
def test__cfg_trans_abort(self, apply_zone_change_mock):
cmd_list = [zone_constant.CFG_ZONE_TRANS_ABORT]
with mock.patch.object(self, '_is_trans_abortable') \
as is_trans_abortable_mock:
is_trans_abortable_mock.return_value = True
self._cfg_trans_abort()
is_trans_abortable_mock.assert_called_once_with()
apply_zone_change_mock.assert_called_once_with(cmd_list)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test__is_trans_abortable_true(self, run_ssh_mock):
cmd_list = [zone_constant.CFG_SHOW_TRANS]
run_ssh_mock.return_value = (Stream(zone_constant.TRANS_ABORTABLE),
None)
data = self._is_trans_abortable()
self.assertTrue(data)
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test__is_trans_abortable_ssh_error(self, run_ssh_mock):
run_ssh_mock.return_value = (Stream(), Stream())
self.assertRaises(b_exception.BrocadeZoningCliException,
self._is_trans_abortable)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test__is_trans_abortable_false(self, run_ssh_mock):
cmd_list = [zone_constant.CFG_SHOW_TRANS]
cfgtransshow = 'There is no outstanding zoning transaction'
run_ssh_mock.return_value = (Stream(cfgtransshow), None)
data = self._is_trans_abortable()
self.assertFalse(data)
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test_apply_zone_change(self, run_ssh_mock):
cmd_list = [zone_constant.CFG_SAVE]
run_ssh_mock.return_value = (None, None)
self.apply_zone_change(cmd_list)
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test__get_switch_info(self, run_ssh_mock):
cmd_list = [zone_constant.NS_SHOW]
nsshow_list = [nsshow]
run_ssh_mock.return_value = (Stream(nsshow), Stream())
switch_data = self._get_switch_info(cmd_list)
self.assertEqual(nsshow_list, switch_data)
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
def test__parse_ns_output(self):
invalid_switch_data = [' N 011a00;20:1a:00:05:1e:e8:e3:29']
expected_wwn_list = ['20:1a:00:05:1e:e8:e3:29']
return_wwn_list = self._parse_ns_output(switch_data)
self.assertEqual(expected_wwn_list, return_wwn_list)
self.assertRaises(exception.InvalidParameterValue,
self._parse_ns_output, invalid_switch_data)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd')
def test_is_supported_firmware(self, exec_shell_cmd_mock):
exec_shell_cmd_mock.return_value = (supported_firmware, None)
self.assertTrue(self.is_supported_firmware())
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd')
def test_is_supported_firmware_invalid(self, exec_shell_cmd_mock):
exec_shell_cmd_mock.return_value = (unsupported_firmware, None)
self.assertFalse(self.is_supported_firmware())
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd')
def test_is_supported_firmware_no_ssh_response(self, exec_shell_cmd_mock):
exec_shell_cmd_mock.return_value = (None, Stream())
self.assertFalse(self.is_supported_firmware())
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd')
def test_is_supported_firmware_ssh_error(self, exec_shell_cmd_mock):
exec_shell_cmd_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(b_exception.BrocadeZoningCliException,
self.is_supported_firmware)
class Channel(object):
def recv_exit_status(self):
return 0
class Stream(object):
def __init__(self, buffer=''):
self.buffer = buffer
self.channel = Channel()
def readlines(self):
return self.buffer
def splitlines(self):
return self.buffer.splitlines()
def close(self):
pass
def flush(self):
self.buffer = ''
| true | true |
f72115d189ce1aea3fd459147ab92b50d1a8393a | 807 | py | Python | bluebottle/bluebottle_drf2/renderers.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 10 | 2015-05-28T18:26:40.000Z | 2021-09-06T10:07:03.000Z | bluebottle/bluebottle_drf2/renderers.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 762 | 2015-01-15T10:00:59.000Z | 2022-03-31T15:35:14.000Z | bluebottle/bluebottle_drf2/renderers.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 9 | 2015-02-20T13:19:30.000Z | 2022-03-08T14:09:17.000Z | from rest_framework_json_api.renderers import JSONRenderer
from django.contrib.auth.models import AnonymousUser
class BluebottleJSONAPIRenderer(JSONRenderer):
def get_indent(self, *args, **kwargs):
return 4
@classmethod
def build_json_resource_obj(
cls,
fields,
resource,
resource_instance,
resource_name,
*args,
**kwargs
):
if isinstance(resource_instance, AnonymousUser):
return {
'id': resource['id'],
'type': resource_name,
'attributes': {
'is-anonymous': True
}
}
return super().build_json_resource_obj(
fields, resource, resource_instance, resource_name, *args, **kwargs
)
| 26.032258 | 79 | 0.570012 | from rest_framework_json_api.renderers import JSONRenderer
from django.contrib.auth.models import AnonymousUser
class BluebottleJSONAPIRenderer(JSONRenderer):
def get_indent(self, *args, **kwargs):
return 4
@classmethod
def build_json_resource_obj(
cls,
fields,
resource,
resource_instance,
resource_name,
*args,
**kwargs
):
if isinstance(resource_instance, AnonymousUser):
return {
'id': resource['id'],
'type': resource_name,
'attributes': {
'is-anonymous': True
}
}
return super().build_json_resource_obj(
fields, resource, resource_instance, resource_name, *args, **kwargs
)
| true | true |
f72116597d007b731f68d9cb1a6c637348e7d55b | 4,912 | py | Python | rclpy/rclpy/context.py | bastinat0r/rclpy | 510b243b2efe9e6b4b20837b7dea8092069cd2d3 | [
"Apache-2.0"
] | 1 | 2021-01-11T06:28:59.000Z | 2021-01-11T06:28:59.000Z | rclpy/rclpy/context.py | bastinat0r/rclpy | 510b243b2efe9e6b4b20837b7dea8092069cd2d3 | [
"Apache-2.0"
] | 1 | 2020-06-28T10:40:59.000Z | 2020-06-28T10:40:59.000Z | rclpy/rclpy/context.py | bastinat0r/rclpy | 510b243b2efe9e6b4b20837b7dea8092069cd2d3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import threading
from typing import Callable
from typing import List
from typing import Optional
import weakref
g_logging_configure_lock = threading.Lock()
g_logging_ref_count = 0
class Context:
"""
Encapsulates the lifecycle of init and shutdown.
Context objects should not be reused, and are finalized in their destructor.
Wraps the `rcl_context_t` type.
"""
def __init__(self):
from rclpy.impl.implementation_singleton import rclpy_implementation
from .handle import Handle
self._handle = Handle(rclpy_implementation.rclpy_create_context())
self._lock = threading.Lock()
self._callbacks = []
self._callbacks_lock = threading.Lock()
self._logging_initialized = False
@property
def handle(self):
return self._handle
def init(self, args: Optional[List[str]] = None, *, initialize_logging: bool = True):
"""
Initialize ROS communications for a given context.
:param args: List of command line arguments.
"""
# imported locally to avoid loading extensions on module import
from rclpy.impl.implementation_singleton import rclpy_implementation
global g_logging_ref_count
with self._handle as capsule, self._lock:
rclpy_implementation.rclpy_init(args if args is not None else sys.argv, capsule)
if initialize_logging and not self._logging_initialized:
with g_logging_configure_lock:
g_logging_ref_count += 1
if g_logging_ref_count == 1:
rclpy_implementation.rclpy_logging_configure(capsule)
self._logging_initialized = True
def ok(self):
"""Check if context hasn't been shut down."""
# imported locally to avoid loading extensions on module import
from rclpy.impl.implementation_singleton import rclpy_implementation
with self._handle as capsule, self._lock:
return rclpy_implementation.rclpy_ok(capsule)
def _call_on_shutdown_callbacks(self):
with self._callbacks_lock:
for weak_method in self._callbacks:
callback = weak_method()
callback()
self._callbacks = []
def shutdown(self):
"""Shutdown this context."""
# imported locally to avoid loading extensions on module import
from rclpy.impl.implementation_singleton import rclpy_implementation
with self._handle as capsule, self._lock:
rclpy_implementation.rclpy_shutdown(capsule)
self._call_on_shutdown_callbacks()
self._logging_fini()
def try_shutdown(self):
"""Shutdown this context, if not already shutdown."""
# imported locally to avoid loading extensions on module import
from rclpy.impl.implementation_singleton import rclpy_implementation
with self._handle as capsule, self._lock:
if rclpy_implementation.rclpy_ok(capsule):
rclpy_implementation.rclpy_shutdown(capsule)
self._call_on_shutdown_callbacks()
def _remove_callback(self, weak_method):
self._callbacks.remove(weak_method)
def on_shutdown(self, callback: Callable[[], None]):
"""Add a callback to be called on shutdown."""
if not callable(callback):
raise TypeError('callback should be a callable, got {}', type(callback))
with self._callbacks_lock:
if not self.ok():
callback()
else:
self._callbacks.append(weakref.WeakMethod(callback, self._remove_callback))
def _logging_fini(self):
from rclpy.impl.implementation_singleton import rclpy_implementation
global g_logging_ref_count
with self._lock:
if self._logging_initialized:
with g_logging_configure_lock:
g_logging_ref_count -= 1
if g_logging_ref_count == 0:
rclpy_implementation.rclpy_logging_fini()
if g_logging_ref_count < 0:
raise RuntimeError(
'Unexpected error: logger ref count should never be lower that zero')
self._logging_initialized = False
| 39.296 | 97 | 0.667142 |
import sys
import threading
from typing import Callable
from typing import List
from typing import Optional
import weakref
g_logging_configure_lock = threading.Lock()
g_logging_ref_count = 0
class Context:
def __init__(self):
from rclpy.impl.implementation_singleton import rclpy_implementation
from .handle import Handle
self._handle = Handle(rclpy_implementation.rclpy_create_context())
self._lock = threading.Lock()
self._callbacks = []
self._callbacks_lock = threading.Lock()
self._logging_initialized = False
@property
def handle(self):
return self._handle
def init(self, args: Optional[List[str]] = None, *, initialize_logging: bool = True):
from rclpy.impl.implementation_singleton import rclpy_implementation
global g_logging_ref_count
with self._handle as capsule, self._lock:
rclpy_implementation.rclpy_init(args if args is not None else sys.argv, capsule)
if initialize_logging and not self._logging_initialized:
with g_logging_configure_lock:
g_logging_ref_count += 1
if g_logging_ref_count == 1:
rclpy_implementation.rclpy_logging_configure(capsule)
self._logging_initialized = True
def ok(self):
from rclpy.impl.implementation_singleton import rclpy_implementation
with self._handle as capsule, self._lock:
return rclpy_implementation.rclpy_ok(capsule)
def _call_on_shutdown_callbacks(self):
with self._callbacks_lock:
for weak_method in self._callbacks:
callback = weak_method()
callback()
self._callbacks = []
def shutdown(self):
from rclpy.impl.implementation_singleton import rclpy_implementation
with self._handle as capsule, self._lock:
rclpy_implementation.rclpy_shutdown(capsule)
self._call_on_shutdown_callbacks()
self._logging_fini()
def try_shutdown(self):
from rclpy.impl.implementation_singleton import rclpy_implementation
with self._handle as capsule, self._lock:
if rclpy_implementation.rclpy_ok(capsule):
rclpy_implementation.rclpy_shutdown(capsule)
self._call_on_shutdown_callbacks()
def _remove_callback(self, weak_method):
self._callbacks.remove(weak_method)
def on_shutdown(self, callback: Callable[[], None]):
if not callable(callback):
raise TypeError('callback should be a callable, got {}', type(callback))
with self._callbacks_lock:
if not self.ok():
callback()
else:
self._callbacks.append(weakref.WeakMethod(callback, self._remove_callback))
def _logging_fini(self):
from rclpy.impl.implementation_singleton import rclpy_implementation
global g_logging_ref_count
with self._lock:
if self._logging_initialized:
with g_logging_configure_lock:
g_logging_ref_count -= 1
if g_logging_ref_count == 0:
rclpy_implementation.rclpy_logging_fini()
if g_logging_ref_count < 0:
raise RuntimeError(
'Unexpected error: logger ref count should never be lower that zero')
self._logging_initialized = False
| true | true |
f72116774894f97836e29f765583285f9e3b5acf | 2,226 | py | Python | .modules/.Infoga/lib/output.py | termux-one/EasY_HaCk | 0a8d09ca4b126b027b6842e02fa0c29d8250e090 | [
"Apache-2.0"
] | 1,103 | 2018-04-20T14:08:11.000Z | 2022-03-29T06:22:43.000Z | .modules/.Infoga/lib/output.py | sshourya948/EasY_HaCk | 0a8d09ca4b126b027b6842e02fa0c29d8250e090 | [
"Apache-2.0"
] | 29 | 2019-04-03T14:52:38.000Z | 2022-03-24T12:33:05.000Z | .modules/.Infoga/lib/output.py | sshourya948/EasY_HaCk | 0a8d09ca4b126b027b6842e02fa0c29d8250e090 | [
"Apache-2.0"
] | 161 | 2018-04-20T15:57:12.000Z | 2022-03-15T19:16:16.000Z | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
#
# @name : Infoga - Email Information Gathering
# @url : http://github.com/m4ll0k
# @author : Momo Outaadi (m4ll0k)
from lib.colors import *
def plus(string):print("%s[+]%s %s"%(G%0,E,string))
def warn(string):print("%s[!]%s %s"%(R%0,E,string))
def test(string):print("%s[*]%s %s"%(B%0,E,string))
def info(string):print("%s[i]%s %s"%(Y%0,E,string))
def more(string):print(" %s|%s %s"%(W%0,E,string))
# pwned data
def ppwned(data,ver):
if 'found' in data['status']:
warn('This email was leaked... found %s results..'%(data['results']))
if ver == 2 or ver == 3:
for i in range(0,len(data['data'])):
more('Leaked in: %s'%data['data'][i]['title'])
more('Data Leaked: %s'%data['data'][i]['date_leaked'])
more('Details: %s'%data['data'][i]['details'])
more('Source Network: %s'%data['data'][i]['source_network'])
print("")
# print shodan return data
def data(ip,data,email,ver):
if ver == 1:plus('Email: %s (%s)'%(email,ip))
elif ver == 2:
try:
plus('Email: %s (%s)'%(email,ip))
if data['hostnames']:more('Hostname: %s'%(data['hostnames'][0]))
if data['country_code'] and data['country_name']:more('Country: %s (%s)'%(data['country_code'],data['country_name']))
if data['city'] and data['region_code']:more('City: %s (%s)'%(data['city'],data['region_code']))
except KeyError as e:
pass
elif ver == 3:
try:
plus('Email: %s (%s)'%(email,ip))
if data['hostnames']:more('Hostname: %s'%(data['hostnames'][0]))
if data['country_code'] and data['country_name']:more('Country: %s (%s)'%(data['country_code'],data['country_name']))
if data['city'] and data['region_code']:more('City: %s (%s)'%(data['city'],data['region_code']))
if data['asn']:more('ASN: %s'%(data['asn']))
if data['isp']:more('ISP: %s'%(data['isp']))
if data['latitude'] and data['longitude']:more('Map: Map: https://www.google.com/maps/@%s,%s,10z (%s,%s)'%(
data['latitude'],data['longitude'],data['latitude'],data['longitude']))
if data['org']:more('Organization: %s'%(data['org']))
if data['ports']:more('Ports: %s'%(data['ports']))
if data['vulns']:more('Vulns: %s'%(data['vulns']))
except KeyError as e:
pass
print("") | 42.807692 | 120 | 0.600629 |
from lib.colors import *
def plus(string):print("%s[+]%s %s"%(G%0,E,string))
def warn(string):print("%s[!]%s %s"%(R%0,E,string))
def test(string):print("%s[*]%s %s"%(B%0,E,string))
def info(string):print("%s[i]%s %s"%(Y%0,E,string))
def more(string):print(" %s|%s %s"%(W%0,E,string))
def ppwned(data,ver):
if 'found' in data['status']:
warn('This email was leaked... found %s results..'%(data['results']))
if ver == 2 or ver == 3:
for i in range(0,len(data['data'])):
more('Leaked in: %s'%data['data'][i]['title'])
more('Data Leaked: %s'%data['data'][i]['date_leaked'])
more('Details: %s'%data['data'][i]['details'])
more('Source Network: %s'%data['data'][i]['source_network'])
print("")
def data(ip,data,email,ver):
if ver == 1:plus('Email: %s (%s)'%(email,ip))
elif ver == 2:
try:
plus('Email: %s (%s)'%(email,ip))
if data['hostnames']:more('Hostname: %s'%(data['hostnames'][0]))
if data['country_code'] and data['country_name']:more('Country: %s (%s)'%(data['country_code'],data['country_name']))
if data['city'] and data['region_code']:more('City: %s (%s)'%(data['city'],data['region_code']))
except KeyError as e:
pass
elif ver == 3:
try:
plus('Email: %s (%s)'%(email,ip))
if data['hostnames']:more('Hostname: %s'%(data['hostnames'][0]))
if data['country_code'] and data['country_name']:more('Country: %s (%s)'%(data['country_code'],data['country_name']))
if data['city'] and data['region_code']:more('City: %s (%s)'%(data['city'],data['region_code']))
if data['asn']:more('ASN: %s'%(data['asn']))
if data['isp']:more('ISP: %s'%(data['isp']))
if data['latitude'] and data['longitude']:more('Map: Map: https://www.google.com/maps/@%s,%s,10z (%s,%s)'%(
data['latitude'],data['longitude'],data['latitude'],data['longitude']))
if data['org']:more('Organization: %s'%(data['org']))
if data['ports']:more('Ports: %s'%(data['ports']))
if data['vulns']:more('Vulns: %s'%(data['vulns']))
except KeyError as e:
pass
print("") | true | true |
f7211689b5c3abfbb49932d88e4323e9e99aec1e | 19,600 | py | Python | pypureclient/flasharray/FA_2_3/models/volume_performance_by_array.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flasharray/FA_2_3/models/volume_performance_by_array.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flasharray/FA_2_3/models/volume_performance_by_array.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_3 import models
class VolumePerformanceByArray(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'bytes_per_mirrored_write': 'int',
'bytes_per_op': 'int',
'bytes_per_read': 'int',
'bytes_per_write': 'int',
'mirrored_write_bytes_per_sec': 'int',
'mirrored_writes_per_sec': 'int',
'qos_rate_limit_usec_per_mirrored_write_op': 'int',
'qos_rate_limit_usec_per_read_op': 'int',
'qos_rate_limit_usec_per_write_op': 'int',
'queue_usec_per_mirrored_write_op': 'int',
'queue_usec_per_read_op': 'int',
'queue_usec_per_write_op': 'int',
'read_bytes_per_sec': 'int',
'reads_per_sec': 'int',
'san_usec_per_mirrored_write_op': 'int',
'san_usec_per_read_op': 'int',
'san_usec_per_write_op': 'int',
'service_usec_per_mirrored_write_op': 'int',
'service_usec_per_read_op': 'int',
'service_usec_per_write_op': 'int',
'time': 'int',
'usec_per_mirrored_write_op': 'int',
'usec_per_read_op': 'int',
'usec_per_write_op': 'int',
'write_bytes_per_sec': 'int',
'writes_per_sec': 'int',
'array': 'Resource'
}
attribute_map = {
'id': 'id',
'name': 'name',
'bytes_per_mirrored_write': 'bytes_per_mirrored_write',
'bytes_per_op': 'bytes_per_op',
'bytes_per_read': 'bytes_per_read',
'bytes_per_write': 'bytes_per_write',
'mirrored_write_bytes_per_sec': 'mirrored_write_bytes_per_sec',
'mirrored_writes_per_sec': 'mirrored_writes_per_sec',
'qos_rate_limit_usec_per_mirrored_write_op': 'qos_rate_limit_usec_per_mirrored_write_op',
'qos_rate_limit_usec_per_read_op': 'qos_rate_limit_usec_per_read_op',
'qos_rate_limit_usec_per_write_op': 'qos_rate_limit_usec_per_write_op',
'queue_usec_per_mirrored_write_op': 'queue_usec_per_mirrored_write_op',
'queue_usec_per_read_op': 'queue_usec_per_read_op',
'queue_usec_per_write_op': 'queue_usec_per_write_op',
'read_bytes_per_sec': 'read_bytes_per_sec',
'reads_per_sec': 'reads_per_sec',
'san_usec_per_mirrored_write_op': 'san_usec_per_mirrored_write_op',
'san_usec_per_read_op': 'san_usec_per_read_op',
'san_usec_per_write_op': 'san_usec_per_write_op',
'service_usec_per_mirrored_write_op': 'service_usec_per_mirrored_write_op',
'service_usec_per_read_op': 'service_usec_per_read_op',
'service_usec_per_write_op': 'service_usec_per_write_op',
'time': 'time',
'usec_per_mirrored_write_op': 'usec_per_mirrored_write_op',
'usec_per_read_op': 'usec_per_read_op',
'usec_per_write_op': 'usec_per_write_op',
'write_bytes_per_sec': 'write_bytes_per_sec',
'writes_per_sec': 'writes_per_sec',
'array': 'array'
}
required_args = {
}
def __init__(
self,
id=None, # type: str
name=None, # type: str
bytes_per_mirrored_write=None, # type: int
bytes_per_op=None, # type: int
bytes_per_read=None, # type: int
bytes_per_write=None, # type: int
mirrored_write_bytes_per_sec=None, # type: int
mirrored_writes_per_sec=None, # type: int
qos_rate_limit_usec_per_mirrored_write_op=None, # type: int
qos_rate_limit_usec_per_read_op=None, # type: int
qos_rate_limit_usec_per_write_op=None, # type: int
queue_usec_per_mirrored_write_op=None, # type: int
queue_usec_per_read_op=None, # type: int
queue_usec_per_write_op=None, # type: int
read_bytes_per_sec=None, # type: int
reads_per_sec=None, # type: int
san_usec_per_mirrored_write_op=None, # type: int
san_usec_per_read_op=None, # type: int
san_usec_per_write_op=None, # type: int
service_usec_per_mirrored_write_op=None, # type: int
service_usec_per_read_op=None, # type: int
service_usec_per_write_op=None, # type: int
time=None, # type: int
usec_per_mirrored_write_op=None, # type: int
usec_per_read_op=None, # type: int
usec_per_write_op=None, # type: int
write_bytes_per_sec=None, # type: int
writes_per_sec=None, # type: int
array=None, # type: models.Resource
):
"""
Keyword args:
id (str): A globally unique, system-generated ID. The ID cannot be modified and cannot refer to another resource.
name (str): A user-specified name. The name must be locally unique and can be changed.
bytes_per_mirrored_write (int): The average I/O size per mirrored write. Measured in bytes.
bytes_per_op (int): The average I/O size for both read and write (all) operations.
bytes_per_read (int): The average I/O size per read. Measured in bytes.
bytes_per_write (int): The average I/O size per write. Measured in bytes.
mirrored_write_bytes_per_sec (int): The number of mirrored bytes written per second.
mirrored_writes_per_sec (int): The number of mirrored writes per second.
qos_rate_limit_usec_per_mirrored_write_op (int): The average time it takes the array to process a mirrored I/O write request. Measured in microseconds.
qos_rate_limit_usec_per_read_op (int): The average time spent waiting due to QoS rate limiting for a read request. Measured in microseconds.
qos_rate_limit_usec_per_write_op (int): The average time that a write I/O request spends waiting as a result of the volume reaching its QoS bandwidth limit. Measured in microseconds.
queue_usec_per_mirrored_write_op (int): The average time that a mirrored write I/O request spends in the array waiting to be served. Measured in microseconds.
queue_usec_per_read_op (int): The average time that a read I/O request spends in the array waiting to be served. Measured in microseconds.
queue_usec_per_write_op (int): The average time that a write I/O request spends in the array waiting to be served. Measured in microseconds.
read_bytes_per_sec (int): The number of bytes read per second.
reads_per_sec (int): The number of read requests processed per second.
san_usec_per_mirrored_write_op (int): The average time required to transfer data from the initiator to the array for a mirrored write request. Measured in microseconds.
san_usec_per_read_op (int): The average time required to transfer data from the array to the initiator for a read request. Measured in microseconds.
san_usec_per_write_op (int): The average time required to transfer data from the initiator to the array for a write request. Measured in microseconds.
service_usec_per_mirrored_write_op (int): The average time required for the array to service a mirrored write request. Measured in microseconds.
service_usec_per_read_op (int): The average time required for the array to service a read request. Measured in microseconds.
service_usec_per_write_op (int): The average time required for the array to service a write request. Measured in microseconds.
time (int): The time when the sample performance data was taken. Measured in milliseconds since the UNIX epoch.
usec_per_mirrored_write_op (int): The average time it takes the array to process a mirrored I/O write request. Measured in microseconds. The average time does not include SAN time, queue time, or QoS rate limit time.
usec_per_read_op (int): The average time it takes the array to process an I/O read request. Measured in microseconds. The average time does not include SAN time, queue time, or QoS rate limit time.
usec_per_write_op (int): The average time it takes the array to process an I/O write request. Measured in microseconds. The average time does not include SAN time, queue time, or QoS rate limit time.
write_bytes_per_sec (int): The number of bytes written per second.
writes_per_sec (int): The number of write requests processed per second.
array (Resource): The array on which the performance metrics were recorded.
"""
if id is not None:
self.id = id
if name is not None:
self.name = name
if bytes_per_mirrored_write is not None:
self.bytes_per_mirrored_write = bytes_per_mirrored_write
if bytes_per_op is not None:
self.bytes_per_op = bytes_per_op
if bytes_per_read is not None:
self.bytes_per_read = bytes_per_read
if bytes_per_write is not None:
self.bytes_per_write = bytes_per_write
if mirrored_write_bytes_per_sec is not None:
self.mirrored_write_bytes_per_sec = mirrored_write_bytes_per_sec
if mirrored_writes_per_sec is not None:
self.mirrored_writes_per_sec = mirrored_writes_per_sec
if qos_rate_limit_usec_per_mirrored_write_op is not None:
self.qos_rate_limit_usec_per_mirrored_write_op = qos_rate_limit_usec_per_mirrored_write_op
if qos_rate_limit_usec_per_read_op is not None:
self.qos_rate_limit_usec_per_read_op = qos_rate_limit_usec_per_read_op
if qos_rate_limit_usec_per_write_op is not None:
self.qos_rate_limit_usec_per_write_op = qos_rate_limit_usec_per_write_op
if queue_usec_per_mirrored_write_op is not None:
self.queue_usec_per_mirrored_write_op = queue_usec_per_mirrored_write_op
if queue_usec_per_read_op is not None:
self.queue_usec_per_read_op = queue_usec_per_read_op
if queue_usec_per_write_op is not None:
self.queue_usec_per_write_op = queue_usec_per_write_op
if read_bytes_per_sec is not None:
self.read_bytes_per_sec = read_bytes_per_sec
if reads_per_sec is not None:
self.reads_per_sec = reads_per_sec
if san_usec_per_mirrored_write_op is not None:
self.san_usec_per_mirrored_write_op = san_usec_per_mirrored_write_op
if san_usec_per_read_op is not None:
self.san_usec_per_read_op = san_usec_per_read_op
if san_usec_per_write_op is not None:
self.san_usec_per_write_op = san_usec_per_write_op
if service_usec_per_mirrored_write_op is not None:
self.service_usec_per_mirrored_write_op = service_usec_per_mirrored_write_op
if service_usec_per_read_op is not None:
self.service_usec_per_read_op = service_usec_per_read_op
if service_usec_per_write_op is not None:
self.service_usec_per_write_op = service_usec_per_write_op
if time is not None:
self.time = time
if usec_per_mirrored_write_op is not None:
self.usec_per_mirrored_write_op = usec_per_mirrored_write_op
if usec_per_read_op is not None:
self.usec_per_read_op = usec_per_read_op
if usec_per_write_op is not None:
self.usec_per_write_op = usec_per_write_op
if write_bytes_per_sec is not None:
self.write_bytes_per_sec = write_bytes_per_sec
if writes_per_sec is not None:
self.writes_per_sec = writes_per_sec
if array is not None:
self.array = array
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `VolumePerformanceByArray`".format(key))
if key == "bytes_per_mirrored_write" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_mirrored_write`, must be a value greater than or equal to `0`")
if key == "bytes_per_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_op`, must be a value greater than or equal to `0`")
if key == "bytes_per_read" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_read`, must be a value greater than or equal to `0`")
if key == "bytes_per_write" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_write`, must be a value greater than or equal to `0`")
if key == "mirrored_write_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `mirrored_write_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "mirrored_writes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `mirrored_writes_per_sec`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "read_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `read_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "reads_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `reads_per_sec`, must be a value greater than or equal to `0`")
if key == "san_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "san_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "san_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "write_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `write_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "writes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `writes_per_sec`, must be a value greater than or equal to `0`")
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(VolumePerformanceByArray, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VolumePerformanceByArray):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 55.211268 | 228 | 0.659847 |
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_3 import models
class VolumePerformanceByArray(object):
swagger_types = {
'id': 'str',
'name': 'str',
'bytes_per_mirrored_write': 'int',
'bytes_per_op': 'int',
'bytes_per_read': 'int',
'bytes_per_write': 'int',
'mirrored_write_bytes_per_sec': 'int',
'mirrored_writes_per_sec': 'int',
'qos_rate_limit_usec_per_mirrored_write_op': 'int',
'qos_rate_limit_usec_per_read_op': 'int',
'qos_rate_limit_usec_per_write_op': 'int',
'queue_usec_per_mirrored_write_op': 'int',
'queue_usec_per_read_op': 'int',
'queue_usec_per_write_op': 'int',
'read_bytes_per_sec': 'int',
'reads_per_sec': 'int',
'san_usec_per_mirrored_write_op': 'int',
'san_usec_per_read_op': 'int',
'san_usec_per_write_op': 'int',
'service_usec_per_mirrored_write_op': 'int',
'service_usec_per_read_op': 'int',
'service_usec_per_write_op': 'int',
'time': 'int',
'usec_per_mirrored_write_op': 'int',
'usec_per_read_op': 'int',
'usec_per_write_op': 'int',
'write_bytes_per_sec': 'int',
'writes_per_sec': 'int',
'array': 'Resource'
}
attribute_map = {
'id': 'id',
'name': 'name',
'bytes_per_mirrored_write': 'bytes_per_mirrored_write',
'bytes_per_op': 'bytes_per_op',
'bytes_per_read': 'bytes_per_read',
'bytes_per_write': 'bytes_per_write',
'mirrored_write_bytes_per_sec': 'mirrored_write_bytes_per_sec',
'mirrored_writes_per_sec': 'mirrored_writes_per_sec',
'qos_rate_limit_usec_per_mirrored_write_op': 'qos_rate_limit_usec_per_mirrored_write_op',
'qos_rate_limit_usec_per_read_op': 'qos_rate_limit_usec_per_read_op',
'qos_rate_limit_usec_per_write_op': 'qos_rate_limit_usec_per_write_op',
'queue_usec_per_mirrored_write_op': 'queue_usec_per_mirrored_write_op',
'queue_usec_per_read_op': 'queue_usec_per_read_op',
'queue_usec_per_write_op': 'queue_usec_per_write_op',
'read_bytes_per_sec': 'read_bytes_per_sec',
'reads_per_sec': 'reads_per_sec',
'san_usec_per_mirrored_write_op': 'san_usec_per_mirrored_write_op',
'san_usec_per_read_op': 'san_usec_per_read_op',
'san_usec_per_write_op': 'san_usec_per_write_op',
'service_usec_per_mirrored_write_op': 'service_usec_per_mirrored_write_op',
'service_usec_per_read_op': 'service_usec_per_read_op',
'service_usec_per_write_op': 'service_usec_per_write_op',
'time': 'time',
'usec_per_mirrored_write_op': 'usec_per_mirrored_write_op',
'usec_per_read_op': 'usec_per_read_op',
'usec_per_write_op': 'usec_per_write_op',
'write_bytes_per_sec': 'write_bytes_per_sec',
'writes_per_sec': 'writes_per_sec',
'array': 'array'
}
required_args = {
}
def __init__(
self,
id=None,
name=None,
bytes_per_mirrored_write=None,
bytes_per_op=None,
bytes_per_read=None,
bytes_per_write=None,
mirrored_write_bytes_per_sec=None,
mirrored_writes_per_sec=None,
qos_rate_limit_usec_per_mirrored_write_op=None,
qos_rate_limit_usec_per_read_op=None,
qos_rate_limit_usec_per_write_op=None,
queue_usec_per_mirrored_write_op=None,
queue_usec_per_read_op=None,
queue_usec_per_write_op=None,
read_bytes_per_sec=None,
reads_per_sec=None,
san_usec_per_mirrored_write_op=None,
san_usec_per_read_op=None,
san_usec_per_write_op=None,
service_usec_per_mirrored_write_op=None,
service_usec_per_read_op=None,
service_usec_per_write_op=None,
time=None,
usec_per_mirrored_write_op=None,
usec_per_read_op=None,
usec_per_write_op=None,
write_bytes_per_sec=None,
writes_per_sec=None,
array=None,
):
if id is not None:
self.id = id
if name is not None:
self.name = name
if bytes_per_mirrored_write is not None:
self.bytes_per_mirrored_write = bytes_per_mirrored_write
if bytes_per_op is not None:
self.bytes_per_op = bytes_per_op
if bytes_per_read is not None:
self.bytes_per_read = bytes_per_read
if bytes_per_write is not None:
self.bytes_per_write = bytes_per_write
if mirrored_write_bytes_per_sec is not None:
self.mirrored_write_bytes_per_sec = mirrored_write_bytes_per_sec
if mirrored_writes_per_sec is not None:
self.mirrored_writes_per_sec = mirrored_writes_per_sec
if qos_rate_limit_usec_per_mirrored_write_op is not None:
self.qos_rate_limit_usec_per_mirrored_write_op = qos_rate_limit_usec_per_mirrored_write_op
if qos_rate_limit_usec_per_read_op is not None:
self.qos_rate_limit_usec_per_read_op = qos_rate_limit_usec_per_read_op
if qos_rate_limit_usec_per_write_op is not None:
self.qos_rate_limit_usec_per_write_op = qos_rate_limit_usec_per_write_op
if queue_usec_per_mirrored_write_op is not None:
self.queue_usec_per_mirrored_write_op = queue_usec_per_mirrored_write_op
if queue_usec_per_read_op is not None:
self.queue_usec_per_read_op = queue_usec_per_read_op
if queue_usec_per_write_op is not None:
self.queue_usec_per_write_op = queue_usec_per_write_op
if read_bytes_per_sec is not None:
self.read_bytes_per_sec = read_bytes_per_sec
if reads_per_sec is not None:
self.reads_per_sec = reads_per_sec
if san_usec_per_mirrored_write_op is not None:
self.san_usec_per_mirrored_write_op = san_usec_per_mirrored_write_op
if san_usec_per_read_op is not None:
self.san_usec_per_read_op = san_usec_per_read_op
if san_usec_per_write_op is not None:
self.san_usec_per_write_op = san_usec_per_write_op
if service_usec_per_mirrored_write_op is not None:
self.service_usec_per_mirrored_write_op = service_usec_per_mirrored_write_op
if service_usec_per_read_op is not None:
self.service_usec_per_read_op = service_usec_per_read_op
if service_usec_per_write_op is not None:
self.service_usec_per_write_op = service_usec_per_write_op
if time is not None:
self.time = time
if usec_per_mirrored_write_op is not None:
self.usec_per_mirrored_write_op = usec_per_mirrored_write_op
if usec_per_read_op is not None:
self.usec_per_read_op = usec_per_read_op
if usec_per_write_op is not None:
self.usec_per_write_op = usec_per_write_op
if write_bytes_per_sec is not None:
self.write_bytes_per_sec = write_bytes_per_sec
if writes_per_sec is not None:
self.writes_per_sec = writes_per_sec
if array is not None:
self.array = array
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `VolumePerformanceByArray`".format(key))
if key == "bytes_per_mirrored_write" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_mirrored_write`, must be a value greater than or equal to `0`")
if key == "bytes_per_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_op`, must be a value greater than or equal to `0`")
if key == "bytes_per_read" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_read`, must be a value greater than or equal to `0`")
if key == "bytes_per_write" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_write`, must be a value greater than or equal to `0`")
if key == "mirrored_write_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `mirrored_write_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "mirrored_writes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `mirrored_writes_per_sec`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "read_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `read_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "reads_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `reads_per_sec`, must be a value greater than or equal to `0`")
if key == "san_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "san_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "san_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "write_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `write_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "writes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `writes_per_sec`, must be a value greater than or equal to `0`")
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(VolumePerformanceByArray, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, VolumePerformanceByArray):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f72116c79003469c2b0e2b7eb8a18e69c2918151 | 3,600 | py | Python | src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/python/stepfunctions.py | ikben/aws-deployment-framework | 9a32492209d35660b9ece66211eb200b64dc0ef9 | [
"Apache-2.0"
] | 1 | 2022-03-24T10:43:53.000Z | 2022-03-24T10:43:53.000Z | src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/python/stepfunctions.py | thomasmcgannon/aws-deployment-framework | 0723ddf4eaf55888ae780dc48873f0ec4766cfbd | [
"Apache-2.0"
] | null | null | null | src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/python/stepfunctions.py | thomasmcgannon/aws-deployment-framework | 0723ddf4eaf55888ae780dc48873f0ec4766cfbd | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
"""
Step Functions module used throughout the ADF
"""
import json
from time import sleep
from logger import configure_logger
from partition import get_partition
LOGGER = configure_logger(__name__)
class StepFunctions:
"""
Class used for modeling Step Functions
"""
def __init__(
self,
role,
deployment_account_id,
deployment_account_region,
regions,
account_ids=None,
full_path=None,
update_pipelines_only=0,
error=0
):
self.deployment_account_region = deployment_account_region
self.client = role.client(
'stepfunctions',
region_name=self.deployment_account_region
)
self.regions = regions
self.deployment_account_id = deployment_account_id
self.update_pipelines_only = update_pipelines_only
self.account_ids = account_ids
self.execution_arn = None
self.full_path = full_path
self.execution_status = None
self.error = error
def execute_statemachine(self):
"""
Main entry to executed state machine in Deployment Account
"""
self._start_statemachine()
self._wait_state_machine_execution()
def _start_statemachine(self):
"""
Executes the Update Cross Account IAM Step Function in the Deployment Account
"""
partition = get_partition(self.deployment_account_region)
self.execution_arn = self.client.start_execution(
stateMachineArn=(
f"arn:{partition}:states:{self.deployment_account_region}:"
f"{self.deployment_account_id}:stateMachine:EnableCrossAccountAccess"
),
input=json.dumps({
"deployment_account_region": self.deployment_account_region,
"deployment_account_id": self.deployment_account_id,
"account_ids": self.account_ids,
"regions": self.regions,
"full_path": self.full_path,
"update_only": self.update_pipelines_only,
"error": self.error
})
).get('executionArn')
self._fetch_statemachine_status()
@property
def execution_status(self):
"""
Returns the status of the state machine
"""
return self._execution_status
@execution_status.setter
def execution_status(self, execution_status):
"""
Set the status of the state machine
"""
self._execution_status = execution_status
def _fetch_statemachine_status(self):
"""
Get the current status of the state machine
"""
execution = self.client.describe_execution(
executionArn=self.execution_arn
)
self._execution_status = execution.get('status', None)
# Is there a legit waiter for this?
def _wait_state_machine_execution(self):
"""
Waits until the state machine is complete
"""
while self.execution_status == 'RUNNING':
self._fetch_statemachine_status()
sleep(10) # Wait for 10 seconds and check the status again
if self.execution_status in ('FAILED', 'ABORTED', 'TIMED_OUT'):
raise Exception(
f'State Machine on Deployment account {self.deployment_account_id} '
f'has status: {self.execution_status}, see logs'
)
| 31.578947 | 85 | 0.621944 |
import json
from time import sleep
from logger import configure_logger
from partition import get_partition
LOGGER = configure_logger(__name__)
class StepFunctions:
def __init__(
self,
role,
deployment_account_id,
deployment_account_region,
regions,
account_ids=None,
full_path=None,
update_pipelines_only=0,
error=0
):
self.deployment_account_region = deployment_account_region
self.client = role.client(
'stepfunctions',
region_name=self.deployment_account_region
)
self.regions = regions
self.deployment_account_id = deployment_account_id
self.update_pipelines_only = update_pipelines_only
self.account_ids = account_ids
self.execution_arn = None
self.full_path = full_path
self.execution_status = None
self.error = error
def execute_statemachine(self):
self._start_statemachine()
self._wait_state_machine_execution()
def _start_statemachine(self):
partition = get_partition(self.deployment_account_region)
self.execution_arn = self.client.start_execution(
stateMachineArn=(
f"arn:{partition}:states:{self.deployment_account_region}:"
f"{self.deployment_account_id}:stateMachine:EnableCrossAccountAccess"
),
input=json.dumps({
"deployment_account_region": self.deployment_account_region,
"deployment_account_id": self.deployment_account_id,
"account_ids": self.account_ids,
"regions": self.regions,
"full_path": self.full_path,
"update_only": self.update_pipelines_only,
"error": self.error
})
).get('executionArn')
self._fetch_statemachine_status()
@property
def execution_status(self):
return self._execution_status
@execution_status.setter
def execution_status(self, execution_status):
self._execution_status = execution_status
def _fetch_statemachine_status(self):
execution = self.client.describe_execution(
executionArn=self.execution_arn
)
self._execution_status = execution.get('status', None)
def _wait_state_machine_execution(self):
while self.execution_status == 'RUNNING':
self._fetch_statemachine_status()
sleep(10)
if self.execution_status in ('FAILED', 'ABORTED', 'TIMED_OUT'):
raise Exception(
f'State Machine on Deployment account {self.deployment_account_id} '
f'has status: {self.execution_status}, see logs'
)
| true | true |
f721174bebba042d3b37612296998e084c86fde8 | 918 | py | Python | apps/cli/utils/merge_yaml_sources.py | derekmerck/DIANA | 5553265b8fc822b35848d0966b25b93b99d503fb | [
"MIT"
] | 9 | 2018-03-15T19:10:27.000Z | 2021-03-15T21:01:24.000Z | apps/cli/utils/merge_yaml_sources.py | derekmerck/DIANA | 5553265b8fc822b35848d0966b25b93b99d503fb | [
"MIT"
] | null | null | null | apps/cli/utils/merge_yaml_sources.py | derekmerck/DIANA | 5553265b8fc822b35848d0966b25b93b99d503fb | [
"MIT"
] | 2 | 2018-03-15T19:13:22.000Z | 2018-04-18T16:33:33.000Z | import os, logging
from glob import glob
from pprint import pformat
import yaml
"""
Env var expansion and merge data from:
- input in yaml/json format
- input file or dir of files in yaml/json format
"""
def merge_yaml_sources(data=None, path=None):
result = {}
if data:
data_exp = os.path.expandvars(data)
result = yaml.safe_load(data_exp)
if os.path.isfile(path):
with open(path) as f:
finput_exp = os.path.expandvars(f.read())
result.update(yaml.safe_load(finput_exp))
elif os.path.isdir(path):
fps = glob(os.path.join(path, "*.yml"))
for fp in fps:
with open(fp) as f:
finput_exp = os.path.expandvars(f.read())
result.update(yaml.safe_load(finput_exp))
logging.debug("Merged yaml maps")
logging.debug("===================")
logging.debug(pformat(result))
return result | 27 | 57 | 0.615468 | import os, logging
from glob import glob
from pprint import pformat
import yaml
def merge_yaml_sources(data=None, path=None):
result = {}
if data:
data_exp = os.path.expandvars(data)
result = yaml.safe_load(data_exp)
if os.path.isfile(path):
with open(path) as f:
finput_exp = os.path.expandvars(f.read())
result.update(yaml.safe_load(finput_exp))
elif os.path.isdir(path):
fps = glob(os.path.join(path, "*.yml"))
for fp in fps:
with open(fp) as f:
finput_exp = os.path.expandvars(f.read())
result.update(yaml.safe_load(finput_exp))
logging.debug("Merged yaml maps")
logging.debug("===================")
logging.debug(pformat(result))
return result | true | true |
f7211765b08d783a5f129616815fe2035703ff38 | 25,215 | py | Python | neutron/agent/l3/router_info.py | markmcclain/neutron | 3108d2dece0501dbb661e2f5a4bb530a199f9fde | [
"Apache-2.0"
] | 3 | 2016-08-07T01:25:54.000Z | 2021-03-01T10:19:14.000Z | neutron/agent/l3/router_info.py | cyysu/neutron_read | 07d1a526d7d44ad0207d27e0ee04f1582541ab89 | [
"Apache-2.0"
] | null | null | null | neutron/agent/l3/router_info.py | cyysu/neutron_read | 07d1a526d7d44ad0207d27e0ee04f1582541ab89 | [
"Apache-2.0"
] | 2 | 2016-09-10T13:21:10.000Z | 2016-12-23T01:44:53.000Z | # Copyright (c) 2014 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_log import log as logging
from neutron.agent.l3 import namespaces
from neutron.agent.linux import ip_lib
from neutron.agent.linux import iptables_manager
from neutron.agent.linux import ra
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.common import utils as common_utils
from neutron.i18n import _LW
LOG = logging.getLogger(__name__)
INTERNAL_DEV_PREFIX = namespaces.INTERNAL_DEV_PREFIX
EXTERNAL_DEV_PREFIX = namespaces.EXTERNAL_DEV_PREFIX
EXTERNAL_INGRESS_MARK_MASK = '0xffffffff'
class RouterInfo(object):
def __init__(self,
router_id,
router,
agent_conf,
interface_driver,
use_ipv6=False):
self.router_id = router_id
self.ex_gw_port = None
self._snat_enabled = None
self._snat_action = None
self.internal_ports = []
self.floating_ips = set()
# Invoke the setter for establishing initial SNAT action
self.router = router
self.use_ipv6 = use_ipv6
self.ns_name = None
self.router_namespace = None
if agent_conf.use_namespaces:
ns = namespaces.RouterNamespace(
router_id, agent_conf, interface_driver, use_ipv6)
self.router_namespace = ns
self.ns_name = ns.name
self.iptables_manager = iptables_manager.IptablesManager(
use_ipv6=use_ipv6,
namespace=self.ns_name)
self.routes = []
self.agent_conf = agent_conf
self.driver = interface_driver
# radvd is a neutron.agent.linux.ra.DaemonMonitor
self.radvd = None
def initialize(self, process_monitor):
"""Initialize the router on the system.
This differs from __init__ in that this method actually affects the
system creating namespaces, starting processes, etc. The other merely
initializes the python object. This separates in-memory object
initialization from methods that actually go do stuff to the system.
:param process_monitor: The agent's process monitor instance.
"""
self.process_monitor = process_monitor
self.radvd = ra.DaemonMonitor(self.router_id,
self.ns_name,
process_monitor,
self.get_internal_device_name)
if self.router_namespace:
self.router_namespace.create()
@property
def router(self):
return self._router
@router.setter
def router(self, value):
self._router = value
if not self._router:
return
# enable_snat by default if it wasn't specified by plugin
self._snat_enabled = self._router.get('enable_snat', True)
# Set a SNAT action for the router
if self._router.get('gw_port'):
self._snat_action = ('add_rules' if self._snat_enabled
else 'remove_rules')
elif self.ex_gw_port:
# Gateway port was removed, remove rules
self._snat_action = 'remove_rules'
@property
def is_ha(self):
# TODO(Carl) Refactoring should render this obsolete. Remove it.
return False
def get_internal_device_name(self, port_id):
return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_external_device_name(self, port_id):
return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_external_device_interface_name(self, ex_gw_port):
return self.get_external_device_name(ex_gw_port['id'])
def perform_snat_action(self, snat_callback, *args):
# Process SNAT rules for attached subnets
if self._snat_action:
snat_callback(self._router.get('gw_port'),
*args,
action=self._snat_action)
self._snat_action = None
def _update_routing_table(self, operation, route):
cmd = ['ip', 'route', operation, 'to', route['destination'],
'via', route['nexthop']]
ip_wrapper = ip_lib.IPWrapper(namespace=self.ns_name)
ip_wrapper.netns.execute(cmd, check_exit_code=False)
def routes_updated(self):
new_routes = self.router['routes']
old_routes = self.routes
adds, removes = common_utils.diff_list_of_dict(old_routes,
new_routes)
for route in adds:
LOG.debug("Added route entry is '%s'", route)
# remove replaced route from deleted route
for del_route in removes:
if route['destination'] == del_route['destination']:
removes.remove(del_route)
#replace success even if there is no existing route
self._update_routing_table('replace', route)
for route in removes:
LOG.debug("Removed route entry is '%s'", route)
self._update_routing_table('delete', route)
self.routes = new_routes
def get_ex_gw_port(self):
return self.router.get('gw_port')
def get_floating_ips(self):
"""Filter Floating IPs to be hosted on this agent."""
return self.router.get(l3_constants.FLOATINGIP_KEY, [])
def floating_forward_rules(self, floating_ip, fixed_ip):
return [('PREROUTING', '-d %s -j DNAT --to %s' %
(floating_ip, fixed_ip)),
('OUTPUT', '-d %s -j DNAT --to %s' %
(floating_ip, fixed_ip)),
('float-snat', '-s %s -j SNAT --to %s' %
(fixed_ip, floating_ip))]
def process_floating_ip_nat_rules(self):
"""Configure NAT rules for the router's floating IPs.
Configures iptables rules for the floating ips of the given router
"""
# Clear out all iptables rules for floating ips
self.iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip')
floating_ips = self.get_floating_ips()
# Loop once to ensure that floating ips are configured.
for fip in floating_ips:
# Rebuild iptables rules for the floating ip.
fixed = fip['fixed_ip_address']
fip_ip = fip['floating_ip_address']
for chain, rule in self.floating_forward_rules(fip_ip, fixed):
self.iptables_manager.ipv4['nat'].add_rule(chain, rule,
tag='floating_ip')
self.iptables_manager.apply()
def process_snat_dnat_for_fip(self):
try:
self.process_floating_ip_nat_rules()
except Exception:
# TODO(salv-orlando): Less broad catching
raise n_exc.FloatingIpSetupException(
'L3 agent failure to setup NAT for floating IPs')
def _add_fip_addr_to_device(self, fip, device):
"""Configures the floating ip address on the device.
"""
try:
ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
device.addr.add(ip_cidr)
return True
except RuntimeError:
# any exception occurred here should cause the floating IP
# to be set in error state
LOG.warn(_LW("Unable to configure IP address for "
"floating IP: %s"), fip['id'])
def add_floating_ip(self, fip, interface_name, device):
raise NotImplementedError()
def remove_floating_ip(self, device, ip_cidr):
device.addr.delete(ip_cidr)
self.driver.delete_conntrack_state(namespace=self.ns_name, ip=ip_cidr)
def get_router_cidrs(self, device):
return set([addr['cidr'] for addr in device.addr.list()])
def process_floating_ip_addresses(self, interface_name):
"""Configure IP addresses on router's external gateway interface.
Ensures addresses for existing floating IPs and cleans up
those that should not longer be configured.
"""
fip_statuses = {}
if interface_name is None:
LOG.debug('No Interface for floating IPs router: %s',
self.router['id'])
return fip_statuses
device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
existing_cidrs = self.get_router_cidrs(device)
new_cidrs = set()
floating_ips = self.get_floating_ips()
# Loop once to ensure that floating ips are configured.
for fip in floating_ips:
fip_ip = fip['floating_ip_address']
ip_cidr = common_utils.ip_to_cidr(fip_ip)
new_cidrs.add(ip_cidr)
fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ACTIVE
if ip_cidr not in existing_cidrs:
fip_statuses[fip['id']] = self.add_floating_ip(
fip, interface_name, device)
LOG.debug('Floating ip %(id)s added, status %(status)s',
{'id': fip['id'],
'status': fip_statuses.get(fip['id'])})
fips_to_remove = (
ip_cidr for ip_cidr in existing_cidrs - new_cidrs
if common_utils.is_cidr_host(ip_cidr))
for ip_cidr in fips_to_remove:
self.remove_floating_ip(device, ip_cidr)
return fip_statuses
def configure_fip_addresses(self, interface_name):
try:
return self.process_floating_ip_addresses(interface_name)
except Exception:
# TODO(salv-orlando): Less broad catching
raise n_exc.FloatingIpSetupException('L3 agent failure to setup '
'floating IPs')
def put_fips_in_error_state(self):
fip_statuses = {}
for fip in self.router.get(l3_constants.FLOATINGIP_KEY, []):
fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR
return fip_statuses
def delete(self, agent):
self.router['gw_port'] = None
self.router[l3_constants.INTERFACE_KEY] = []
self.router[l3_constants.FLOATINGIP_KEY] = []
self.process(agent)
self.radvd.disable()
if self.router_namespace:
self.router_namespace.delete()
def _internal_network_added(self, ns_name, network_id, port_id,
fixed_ips, mac_address,
interface_name, prefix):
if not ip_lib.device_exists(interface_name,
namespace=ns_name):
self.driver.plug(network_id, port_id, interface_name, mac_address,
namespace=ns_name,
prefix=prefix)
ip_cidrs = common_utils.fixed_ip_cidrs(fixed_ips)
self.driver.init_l3(interface_name, ip_cidrs, namespace=ns_name)
for fixed_ip in fixed_ips:
ip_lib.send_gratuitous_arp(ns_name,
interface_name,
fixed_ip['ip_address'],
self.agent_conf.send_arp_for_ha)
def internal_network_added(self, port):
network_id = port['network_id']
port_id = port['id']
fixed_ips = port['fixed_ips']
mac_address = port['mac_address']
interface_name = self.get_internal_device_name(port_id)
self._internal_network_added(self.ns_name,
network_id,
port_id,
fixed_ips,
mac_address,
interface_name,
INTERNAL_DEV_PREFIX)
def internal_network_removed(self, port):
interface_name = self.get_internal_device_name(port['id'])
if ip_lib.device_exists(interface_name, namespace=self.ns_name):
self.driver.unplug(interface_name, namespace=self.ns_name,
prefix=INTERNAL_DEV_PREFIX)
def _get_existing_devices(self):
ip_wrapper = ip_lib.IPWrapper(namespace=self.ns_name)
ip_devs = ip_wrapper.get_devices(exclude_loopback=True)
return [ip_dev.name for ip_dev in ip_devs]
def _process_internal_ports(self):
existing_port_ids = set(p['id'] for p in self.internal_ports)
internal_ports = self.router.get(l3_constants.INTERFACE_KEY, [])
current_port_ids = set(p['id'] for p in internal_ports
if p['admin_state_up'])
new_port_ids = current_port_ids - existing_port_ids
new_ports = [p for p in internal_ports if p['id'] in new_port_ids]
old_ports = [p for p in self.internal_ports
if p['id'] not in current_port_ids]
new_ipv6_port = False
old_ipv6_port = False
for p in new_ports:
self.internal_network_added(p)
self.internal_ports.append(p)
if not new_ipv6_port:
for subnet in p['subnets']:
if netaddr.IPNetwork(subnet['cidr']).version == 6:
new_ipv6_port = True
break
for p in old_ports:
self.internal_network_removed(p)
self.internal_ports.remove(p)
if not old_ipv6_port:
for subnet in p['subnets']:
if netaddr.IPNetwork(subnet['cidr']).version == 6:
old_ipv6_port = True
break
# Enable RA
if new_ipv6_port or old_ipv6_port:
self.radvd.enable(internal_ports)
existing_devices = self._get_existing_devices()
current_internal_devs = set(n for n in existing_devices
if n.startswith(INTERNAL_DEV_PREFIX))
current_port_devs = set(self.get_internal_device_name(port_id)
for port_id in current_port_ids)
stale_devs = current_internal_devs - current_port_devs
for stale_dev in stale_devs:
LOG.debug('Deleting stale internal router device: %s',
stale_dev)
self.driver.unplug(stale_dev,
namespace=self.ns_name,
prefix=INTERNAL_DEV_PREFIX)
def _list_floating_ip_cidrs(self):
# Compute a list of addresses this router is supposed to have.
# This avoids unnecessarily removing those addresses and
# causing a momentarily network outage.
floating_ips = self.get_floating_ips()
return [common_utils.ip_to_cidr(ip['floating_ip_address'])
for ip in floating_ips]
def _plug_external_gateway(self, ex_gw_port, interface_name, ns_name):
if not ip_lib.device_exists(interface_name, namespace=ns_name):
self.driver.plug(ex_gw_port['network_id'],
ex_gw_port['id'],
interface_name,
ex_gw_port['mac_address'],
bridge=self.agent_conf.external_network_bridge,
namespace=ns_name,
prefix=EXTERNAL_DEV_PREFIX)
def _external_gateway_added(self, ex_gw_port, interface_name,
ns_name, preserve_ips):
self._plug_external_gateway(ex_gw_port, interface_name, ns_name)
# Build up the interface and gateway IP addresses that
# will be added to the interface.
ip_cidrs = common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips'])
gateway_ips = []
enable_ra_on_gw = False
if 'subnets' in ex_gw_port:
gateway_ips = [subnet['gateway_ip']
for subnet in ex_gw_port['subnets']
if subnet['gateway_ip']]
if self.use_ipv6 and not self.is_v6_gateway_set(gateway_ips):
# No IPv6 gateway is available, but IPv6 is enabled.
if self.agent_conf.ipv6_gateway:
# ipv6_gateway configured, use address for default route.
gateway_ips.append(self.agent_conf.ipv6_gateway)
else:
# ipv6_gateway is also not configured.
# Use RA for default route.
enable_ra_on_gw = True
self.driver.init_l3(interface_name,
ip_cidrs,
namespace=ns_name,
gateway_ips=gateway_ips,
extra_subnets=ex_gw_port.get('extra_subnets', []),
preserve_ips=preserve_ips,
enable_ra_on_gw=enable_ra_on_gw)
for fixed_ip in ex_gw_port['fixed_ips']:
ip_lib.send_gratuitous_arp(ns_name,
interface_name,
fixed_ip['ip_address'],
self.agent_conf.send_arp_for_ha)
def is_v6_gateway_set(self, gateway_ips):
"""Check to see if list of gateway_ips has an IPv6 gateway.
"""
# Note - don't require a try-except here as all
# gateway_ips elements are valid addresses, if they exist.
return any(netaddr.IPAddress(gw_ip).version == 6
for gw_ip in gateway_ips)
def external_gateway_added(self, ex_gw_port, interface_name):
preserve_ips = self._list_floating_ip_cidrs()
self._external_gateway_added(
ex_gw_port, interface_name, self.ns_name, preserve_ips)
def external_gateway_updated(self, ex_gw_port, interface_name):
preserve_ips = self._list_floating_ip_cidrs()
self._external_gateway_added(
ex_gw_port, interface_name, self.ns_name, preserve_ips)
def external_gateway_removed(self, ex_gw_port, interface_name):
self.driver.unplug(interface_name,
bridge=self.agent_conf.external_network_bridge,
namespace=self.ns_name,
prefix=EXTERNAL_DEV_PREFIX)
def _process_external_gateway(self, ex_gw_port):
# TODO(Carl) Refactor to clarify roles of ex_gw_port vs self.ex_gw_port
ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or
self.ex_gw_port and self.ex_gw_port['id'])
interface_name = None
if ex_gw_port_id:
interface_name = self.get_external_device_name(ex_gw_port_id)
if ex_gw_port:
def _gateway_ports_equal(port1, port2):
def _get_filtered_dict(d, ignore):
return dict((k, v) for k, v in d.iteritems()
if k not in ignore)
keys_to_ignore = set(['binding:host_id'])
port1_filtered = _get_filtered_dict(port1, keys_to_ignore)
port2_filtered = _get_filtered_dict(port2, keys_to_ignore)
return port1_filtered == port2_filtered
if not self.ex_gw_port:
self.external_gateway_added(ex_gw_port, interface_name)
elif not _gateway_ports_equal(ex_gw_port, self.ex_gw_port):
self.external_gateway_updated(ex_gw_port, interface_name)
elif not ex_gw_port and self.ex_gw_port:
self.external_gateway_removed(self.ex_gw_port, interface_name)
existing_devices = self._get_existing_devices()
stale_devs = [dev for dev in existing_devices
if dev.startswith(EXTERNAL_DEV_PREFIX)
and dev != interface_name]
for stale_dev in stale_devs:
LOG.debug('Deleting stale external router device: %s', stale_dev)
self.driver.unplug(stale_dev,
bridge=self.agent_conf.external_network_bridge,
namespace=self.ns_name,
prefix=EXTERNAL_DEV_PREFIX)
# Process SNAT rules for external gateway
self.perform_snat_action(self._handle_router_snat_rules,
interface_name)
def external_gateway_nat_rules(self, ex_gw_ip, interface_name):
mark = self.agent_conf.external_ingress_mark
rules = [('POSTROUTING', '! -i %(interface_name)s '
'! -o %(interface_name)s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' %
{'interface_name': interface_name}),
('snat', '-o %s -j SNAT --to-source %s' %
(interface_name, ex_gw_ip)),
('snat', '-m mark ! --mark %s '
'-m conntrack --ctstate DNAT '
'-j SNAT --to-source %s' % (mark, ex_gw_ip))]
return rules
def external_gateway_mangle_rules(self, interface_name):
mark = self.agent_conf.external_ingress_mark
rules = [('mark', '-i %s -j MARK --set-xmark %s/%s' %
(interface_name, mark, EXTERNAL_INGRESS_MARK_MASK))]
return rules
def _empty_snat_chains(self, iptables_manager):
iptables_manager.ipv4['nat'].empty_chain('POSTROUTING')
iptables_manager.ipv4['nat'].empty_chain('snat')
iptables_manager.ipv4['mangle'].empty_chain('mark')
def _add_snat_rules(self, ex_gw_port, iptables_manager,
interface_name, action):
if action == 'add_rules' and ex_gw_port:
# ex_gw_port should not be None in this case
# NAT rules are added only if ex_gw_port has an IPv4 address
for ip_addr in ex_gw_port['fixed_ips']:
ex_gw_ip = ip_addr['ip_address']
if netaddr.IPAddress(ex_gw_ip).version == 4:
rules = self.external_gateway_nat_rules(ex_gw_ip,
interface_name)
for rule in rules:
iptables_manager.ipv4['nat'].add_rule(*rule)
rules = self.external_gateway_mangle_rules(interface_name)
for rule in rules:
iptables_manager.ipv4['mangle'].add_rule(*rule)
break
def _handle_router_snat_rules(self, ex_gw_port,
interface_name, action):
self._empty_snat_chains(self.iptables_manager)
self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat')
self._add_snat_rules(ex_gw_port,
self.iptables_manager,
interface_name,
action)
def process_external(self, agent):
existing_floating_ips = self.floating_ips
try:
with self.iptables_manager.defer_apply():
ex_gw_port = self.get_ex_gw_port()
self._process_external_gateway(ex_gw_port)
# TODO(Carl) Return after setting existing_floating_ips and
# still call update_fip_statuses?
if not ex_gw_port:
return
# Process SNAT/DNAT rules and addresses for floating IPs
self.process_snat_dnat_for_fip()
# Once NAT rules for floating IPs are safely in place
# configure their addresses on the external gateway port
interface_name = self.get_external_device_interface_name(
ex_gw_port)
fip_statuses = self.configure_fip_addresses(interface_name)
except (n_exc.FloatingIpSetupException,
n_exc.IpTablesApplyException) as e:
# All floating IPs must be put in error state
LOG.exception(e)
fip_statuses = self.put_fips_in_error_state()
agent.update_fip_statuses(self, existing_floating_ips, fip_statuses)
@common_utils.exception_logger()
def process(self, agent):
"""Process updates to this router
This method is the point where the agent requests that updates be
applied to this router.
:param agent: Passes the agent in order to send RPC messages.
"""
self._process_internal_ports()
self.process_external(agent)
# Process static routes for router
self.routes_updated()
# Update ex_gw_port and enable_snat on the router info cache
self.ex_gw_port = self.get_ex_gw_port()
self.snat_ports = self.router.get(
l3_constants.SNAT_ROUTER_INTF_KEY, [])
self.enable_snat = self.router.get('enable_snat')
| 42.592905 | 79 | 0.599683 |
import netaddr
from oslo_log import log as logging
from neutron.agent.l3 import namespaces
from neutron.agent.linux import ip_lib
from neutron.agent.linux import iptables_manager
from neutron.agent.linux import ra
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.common import utils as common_utils
from neutron.i18n import _LW
LOG = logging.getLogger(__name__)
INTERNAL_DEV_PREFIX = namespaces.INTERNAL_DEV_PREFIX
EXTERNAL_DEV_PREFIX = namespaces.EXTERNAL_DEV_PREFIX
EXTERNAL_INGRESS_MARK_MASK = '0xffffffff'
class RouterInfo(object):
def __init__(self,
router_id,
router,
agent_conf,
interface_driver,
use_ipv6=False):
self.router_id = router_id
self.ex_gw_port = None
self._snat_enabled = None
self._snat_action = None
self.internal_ports = []
self.floating_ips = set()
self.router = router
self.use_ipv6 = use_ipv6
self.ns_name = None
self.router_namespace = None
if agent_conf.use_namespaces:
ns = namespaces.RouterNamespace(
router_id, agent_conf, interface_driver, use_ipv6)
self.router_namespace = ns
self.ns_name = ns.name
self.iptables_manager = iptables_manager.IptablesManager(
use_ipv6=use_ipv6,
namespace=self.ns_name)
self.routes = []
self.agent_conf = agent_conf
self.driver = interface_driver
self.radvd = None
def initialize(self, process_monitor):
self.process_monitor = process_monitor
self.radvd = ra.DaemonMonitor(self.router_id,
self.ns_name,
process_monitor,
self.get_internal_device_name)
if self.router_namespace:
self.router_namespace.create()
@property
def router(self):
return self._router
@router.setter
def router(self, value):
self._router = value
if not self._router:
return
self._snat_enabled = self._router.get('enable_snat', True)
# Set a SNAT action for the router
if self._router.get('gw_port'):
self._snat_action = ('add_rules' if self._snat_enabled
else 'remove_rules')
elif self.ex_gw_port:
# Gateway port was removed, remove rules
self._snat_action = 'remove_rules'
@property
def is_ha(self):
# TODO(Carl) Refactoring should render this obsolete. Remove it.
return False
def get_internal_device_name(self, port_id):
return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_external_device_name(self, port_id):
return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_external_device_interface_name(self, ex_gw_port):
return self.get_external_device_name(ex_gw_port['id'])
def perform_snat_action(self, snat_callback, *args):
# Process SNAT rules for attached subnets
if self._snat_action:
snat_callback(self._router.get('gw_port'),
*args,
action=self._snat_action)
self._snat_action = None
def _update_routing_table(self, operation, route):
cmd = ['ip', 'route', operation, 'to', route['destination'],
'via', route['nexthop']]
ip_wrapper = ip_lib.IPWrapper(namespace=self.ns_name)
ip_wrapper.netns.execute(cmd, check_exit_code=False)
def routes_updated(self):
new_routes = self.router['routes']
old_routes = self.routes
adds, removes = common_utils.diff_list_of_dict(old_routes,
new_routes)
for route in adds:
LOG.debug("Added route entry is '%s'", route)
# remove replaced route from deleted route
for del_route in removes:
if route['destination'] == del_route['destination']:
removes.remove(del_route)
#replace success even if there is no existing route
self._update_routing_table('replace', route)
for route in removes:
LOG.debug("Removed route entry is '%s'", route)
self._update_routing_table('delete', route)
self.routes = new_routes
def get_ex_gw_port(self):
return self.router.get('gw_port')
def get_floating_ips(self):
return self.router.get(l3_constants.FLOATINGIP_KEY, [])
def floating_forward_rules(self, floating_ip, fixed_ip):
return [('PREROUTING', '-d %s -j DNAT --to %s' %
(floating_ip, fixed_ip)),
('OUTPUT', '-d %s -j DNAT --to %s' %
(floating_ip, fixed_ip)),
('float-snat', '-s %s -j SNAT --to %s' %
(fixed_ip, floating_ip))]
def process_floating_ip_nat_rules(self):
# Clear out all iptables rules for floating ips
self.iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip')
floating_ips = self.get_floating_ips()
# Loop once to ensure that floating ips are configured.
for fip in floating_ips:
# Rebuild iptables rules for the floating ip.
fixed = fip['fixed_ip_address']
fip_ip = fip['floating_ip_address']
for chain, rule in self.floating_forward_rules(fip_ip, fixed):
self.iptables_manager.ipv4['nat'].add_rule(chain, rule,
tag='floating_ip')
self.iptables_manager.apply()
def process_snat_dnat_for_fip(self):
try:
self.process_floating_ip_nat_rules()
except Exception:
# TODO(salv-orlando): Less broad catching
raise n_exc.FloatingIpSetupException(
'L3 agent failure to setup NAT for floating IPs')
def _add_fip_addr_to_device(self, fip, device):
try:
ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
device.addr.add(ip_cidr)
return True
except RuntimeError:
# any exception occurred here should cause the floating IP
# to be set in error state
LOG.warn(_LW("Unable to configure IP address for "
"floating IP: %s"), fip['id'])
def add_floating_ip(self, fip, interface_name, device):
raise NotImplementedError()
def remove_floating_ip(self, device, ip_cidr):
device.addr.delete(ip_cidr)
self.driver.delete_conntrack_state(namespace=self.ns_name, ip=ip_cidr)
def get_router_cidrs(self, device):
return set([addr['cidr'] for addr in device.addr.list()])
def process_floating_ip_addresses(self, interface_name):
fip_statuses = {}
if interface_name is None:
LOG.debug('No Interface for floating IPs router: %s',
self.router['id'])
return fip_statuses
device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
existing_cidrs = self.get_router_cidrs(device)
new_cidrs = set()
floating_ips = self.get_floating_ips()
# Loop once to ensure that floating ips are configured.
for fip in floating_ips:
fip_ip = fip['floating_ip_address']
ip_cidr = common_utils.ip_to_cidr(fip_ip)
new_cidrs.add(ip_cidr)
fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ACTIVE
if ip_cidr not in existing_cidrs:
fip_statuses[fip['id']] = self.add_floating_ip(
fip, interface_name, device)
LOG.debug('Floating ip %(id)s added, status %(status)s',
{'id': fip['id'],
'status': fip_statuses.get(fip['id'])})
fips_to_remove = (
ip_cidr for ip_cidr in existing_cidrs - new_cidrs
if common_utils.is_cidr_host(ip_cidr))
for ip_cidr in fips_to_remove:
self.remove_floating_ip(device, ip_cidr)
return fip_statuses
def configure_fip_addresses(self, interface_name):
try:
return self.process_floating_ip_addresses(interface_name)
except Exception:
# TODO(salv-orlando): Less broad catching
raise n_exc.FloatingIpSetupException('L3 agent failure to setup '
'floating IPs')
def put_fips_in_error_state(self):
fip_statuses = {}
for fip in self.router.get(l3_constants.FLOATINGIP_KEY, []):
fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR
return fip_statuses
def delete(self, agent):
self.router['gw_port'] = None
self.router[l3_constants.INTERFACE_KEY] = []
self.router[l3_constants.FLOATINGIP_KEY] = []
self.process(agent)
self.radvd.disable()
if self.router_namespace:
self.router_namespace.delete()
def _internal_network_added(self, ns_name, network_id, port_id,
fixed_ips, mac_address,
interface_name, prefix):
if not ip_lib.device_exists(interface_name,
namespace=ns_name):
self.driver.plug(network_id, port_id, interface_name, mac_address,
namespace=ns_name,
prefix=prefix)
ip_cidrs = common_utils.fixed_ip_cidrs(fixed_ips)
self.driver.init_l3(interface_name, ip_cidrs, namespace=ns_name)
for fixed_ip in fixed_ips:
ip_lib.send_gratuitous_arp(ns_name,
interface_name,
fixed_ip['ip_address'],
self.agent_conf.send_arp_for_ha)
def internal_network_added(self, port):
network_id = port['network_id']
port_id = port['id']
fixed_ips = port['fixed_ips']
mac_address = port['mac_address']
interface_name = self.get_internal_device_name(port_id)
self._internal_network_added(self.ns_name,
network_id,
port_id,
fixed_ips,
mac_address,
interface_name,
INTERNAL_DEV_PREFIX)
def internal_network_removed(self, port):
interface_name = self.get_internal_device_name(port['id'])
if ip_lib.device_exists(interface_name, namespace=self.ns_name):
self.driver.unplug(interface_name, namespace=self.ns_name,
prefix=INTERNAL_DEV_PREFIX)
def _get_existing_devices(self):
ip_wrapper = ip_lib.IPWrapper(namespace=self.ns_name)
ip_devs = ip_wrapper.get_devices(exclude_loopback=True)
return [ip_dev.name for ip_dev in ip_devs]
def _process_internal_ports(self):
existing_port_ids = set(p['id'] for p in self.internal_ports)
internal_ports = self.router.get(l3_constants.INTERFACE_KEY, [])
current_port_ids = set(p['id'] for p in internal_ports
if p['admin_state_up'])
new_port_ids = current_port_ids - existing_port_ids
new_ports = [p for p in internal_ports if p['id'] in new_port_ids]
old_ports = [p for p in self.internal_ports
if p['id'] not in current_port_ids]
new_ipv6_port = False
old_ipv6_port = False
for p in new_ports:
self.internal_network_added(p)
self.internal_ports.append(p)
if not new_ipv6_port:
for subnet in p['subnets']:
if netaddr.IPNetwork(subnet['cidr']).version == 6:
new_ipv6_port = True
break
for p in old_ports:
self.internal_network_removed(p)
self.internal_ports.remove(p)
if not old_ipv6_port:
for subnet in p['subnets']:
if netaddr.IPNetwork(subnet['cidr']).version == 6:
old_ipv6_port = True
break
# Enable RA
if new_ipv6_port or old_ipv6_port:
self.radvd.enable(internal_ports)
existing_devices = self._get_existing_devices()
current_internal_devs = set(n for n in existing_devices
if n.startswith(INTERNAL_DEV_PREFIX))
current_port_devs = set(self.get_internal_device_name(port_id)
for port_id in current_port_ids)
stale_devs = current_internal_devs - current_port_devs
for stale_dev in stale_devs:
LOG.debug('Deleting stale internal router device: %s',
stale_dev)
self.driver.unplug(stale_dev,
namespace=self.ns_name,
prefix=INTERNAL_DEV_PREFIX)
def _list_floating_ip_cidrs(self):
# Compute a list of addresses this router is supposed to have.
# This avoids unnecessarily removing those addresses and
# causing a momentarily network outage.
floating_ips = self.get_floating_ips()
return [common_utils.ip_to_cidr(ip['floating_ip_address'])
for ip in floating_ips]
def _plug_external_gateway(self, ex_gw_port, interface_name, ns_name):
if not ip_lib.device_exists(interface_name, namespace=ns_name):
self.driver.plug(ex_gw_port['network_id'],
ex_gw_port['id'],
interface_name,
ex_gw_port['mac_address'],
bridge=self.agent_conf.external_network_bridge,
namespace=ns_name,
prefix=EXTERNAL_DEV_PREFIX)
def _external_gateway_added(self, ex_gw_port, interface_name,
ns_name, preserve_ips):
self._plug_external_gateway(ex_gw_port, interface_name, ns_name)
# Build up the interface and gateway IP addresses that
# will be added to the interface.
ip_cidrs = common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips'])
gateway_ips = []
enable_ra_on_gw = False
if 'subnets' in ex_gw_port:
gateway_ips = [subnet['gateway_ip']
for subnet in ex_gw_port['subnets']
if subnet['gateway_ip']]
if self.use_ipv6 and not self.is_v6_gateway_set(gateway_ips):
# No IPv6 gateway is available, but IPv6 is enabled.
if self.agent_conf.ipv6_gateway:
# ipv6_gateway configured, use address for default route.
gateway_ips.append(self.agent_conf.ipv6_gateway)
else:
# ipv6_gateway is also not configured.
# Use RA for default route.
enable_ra_on_gw = True
self.driver.init_l3(interface_name,
ip_cidrs,
namespace=ns_name,
gateway_ips=gateway_ips,
extra_subnets=ex_gw_port.get('extra_subnets', []),
preserve_ips=preserve_ips,
enable_ra_on_gw=enable_ra_on_gw)
for fixed_ip in ex_gw_port['fixed_ips']:
ip_lib.send_gratuitous_arp(ns_name,
interface_name,
fixed_ip['ip_address'],
self.agent_conf.send_arp_for_ha)
def is_v6_gateway_set(self, gateway_ips):
# Note - don't require a try-except here as all
return any(netaddr.IPAddress(gw_ip).version == 6
for gw_ip in gateway_ips)
def external_gateway_added(self, ex_gw_port, interface_name):
preserve_ips = self._list_floating_ip_cidrs()
self._external_gateway_added(
ex_gw_port, interface_name, self.ns_name, preserve_ips)
def external_gateway_updated(self, ex_gw_port, interface_name):
preserve_ips = self._list_floating_ip_cidrs()
self._external_gateway_added(
ex_gw_port, interface_name, self.ns_name, preserve_ips)
def external_gateway_removed(self, ex_gw_port, interface_name):
self.driver.unplug(interface_name,
bridge=self.agent_conf.external_network_bridge,
namespace=self.ns_name,
prefix=EXTERNAL_DEV_PREFIX)
def _process_external_gateway(self, ex_gw_port):
ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or
self.ex_gw_port and self.ex_gw_port['id'])
interface_name = None
if ex_gw_port_id:
interface_name = self.get_external_device_name(ex_gw_port_id)
if ex_gw_port:
def _gateway_ports_equal(port1, port2):
def _get_filtered_dict(d, ignore):
return dict((k, v) for k, v in d.iteritems()
if k not in ignore)
keys_to_ignore = set(['binding:host_id'])
port1_filtered = _get_filtered_dict(port1, keys_to_ignore)
port2_filtered = _get_filtered_dict(port2, keys_to_ignore)
return port1_filtered == port2_filtered
if not self.ex_gw_port:
self.external_gateway_added(ex_gw_port, interface_name)
elif not _gateway_ports_equal(ex_gw_port, self.ex_gw_port):
self.external_gateway_updated(ex_gw_port, interface_name)
elif not ex_gw_port and self.ex_gw_port:
self.external_gateway_removed(self.ex_gw_port, interface_name)
existing_devices = self._get_existing_devices()
stale_devs = [dev for dev in existing_devices
if dev.startswith(EXTERNAL_DEV_PREFIX)
and dev != interface_name]
for stale_dev in stale_devs:
LOG.debug('Deleting stale external router device: %s', stale_dev)
self.driver.unplug(stale_dev,
bridge=self.agent_conf.external_network_bridge,
namespace=self.ns_name,
prefix=EXTERNAL_DEV_PREFIX)
self.perform_snat_action(self._handle_router_snat_rules,
interface_name)
def external_gateway_nat_rules(self, ex_gw_ip, interface_name):
mark = self.agent_conf.external_ingress_mark
rules = [('POSTROUTING', '! -i %(interface_name)s '
'! -o %(interface_name)s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' %
{'interface_name': interface_name}),
('snat', '-o %s -j SNAT --to-source %s' %
(interface_name, ex_gw_ip)),
('snat', '-m mark ! --mark %s '
'-m conntrack --ctstate DNAT '
'-j SNAT --to-source %s' % (mark, ex_gw_ip))]
return rules
def external_gateway_mangle_rules(self, interface_name):
mark = self.agent_conf.external_ingress_mark
rules = [('mark', '-i %s -j MARK --set-xmark %s/%s' %
(interface_name, mark, EXTERNAL_INGRESS_MARK_MASK))]
return rules
def _empty_snat_chains(self, iptables_manager):
iptables_manager.ipv4['nat'].empty_chain('POSTROUTING')
iptables_manager.ipv4['nat'].empty_chain('snat')
iptables_manager.ipv4['mangle'].empty_chain('mark')
def _add_snat_rules(self, ex_gw_port, iptables_manager,
interface_name, action):
if action == 'add_rules' and ex_gw_port:
for ip_addr in ex_gw_port['fixed_ips']:
ex_gw_ip = ip_addr['ip_address']
if netaddr.IPAddress(ex_gw_ip).version == 4:
rules = self.external_gateway_nat_rules(ex_gw_ip,
interface_name)
for rule in rules:
iptables_manager.ipv4['nat'].add_rule(*rule)
rules = self.external_gateway_mangle_rules(interface_name)
for rule in rules:
iptables_manager.ipv4['mangle'].add_rule(*rule)
break
def _handle_router_snat_rules(self, ex_gw_port,
interface_name, action):
self._empty_snat_chains(self.iptables_manager)
self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat')
self._add_snat_rules(ex_gw_port,
self.iptables_manager,
interface_name,
action)
def process_external(self, agent):
existing_floating_ips = self.floating_ips
try:
with self.iptables_manager.defer_apply():
ex_gw_port = self.get_ex_gw_port()
self._process_external_gateway(ex_gw_port)
if not ex_gw_port:
return
self.process_snat_dnat_for_fip()
interface_name = self.get_external_device_interface_name(
ex_gw_port)
fip_statuses = self.configure_fip_addresses(interface_name)
except (n_exc.FloatingIpSetupException,
n_exc.IpTablesApplyException) as e:
LOG.exception(e)
fip_statuses = self.put_fips_in_error_state()
agent.update_fip_statuses(self, existing_floating_ips, fip_statuses)
@common_utils.exception_logger()
def process(self, agent):
self._process_internal_ports()
self.process_external(agent)
self.routes_updated()
self.ex_gw_port = self.get_ex_gw_port()
self.snat_ports = self.router.get(
l3_constants.SNAT_ROUTER_INTF_KEY, [])
self.enable_snat = self.router.get('enable_snat')
| true | true |
f72117735968d8ce8ce83ea74bae1b18b3eb310b | 284 | py | Python | app/user/urls.py | redoCehT/recipe-app-api | c529f641adf1a7d5af39bf9dc832b68af3348176 | [
"MIT"
] | null | null | null | app/user/urls.py | redoCehT/recipe-app-api | c529f641adf1a7d5af39bf9dc832b68af3348176 | [
"MIT"
] | null | null | null | app/user/urls.py | redoCehT/recipe-app-api | c529f641adf1a7d5af39bf9dc832b68af3348176 | [
"MIT"
] | null | null | null | from django.urls import path
from user import views
app_name = "user"
urlpatterns = [
path("create", views.CreateUserView.as_view(), name="create"),
path("token", views.CreateTokenView.as_view(), name="token"),
path("me", views.ManageUserView.as_view(), name="me"),
]
| 21.846154 | 66 | 0.68662 | from django.urls import path
from user import views
app_name = "user"
urlpatterns = [
path("create", views.CreateUserView.as_view(), name="create"),
path("token", views.CreateTokenView.as_view(), name="token"),
path("me", views.ManageUserView.as_view(), name="me"),
]
| true | true |
f721193ca67842d2930d048034dca9a2d38b368b | 7,370 | py | Python | elodie/media/media.py | phifogg/elodie | 6ca24c10b2b3fa28169976e04a9fd2f524250a44 | [
"Apache-2.0"
] | null | null | null | elodie/media/media.py | phifogg/elodie | 6ca24c10b2b3fa28169976e04a9fd2f524250a44 | [
"Apache-2.0"
] | 1 | 2017-01-07T06:30:43.000Z | 2017-01-19T12:47:07.000Z | elodie/media/media.py | phifogg/elodie | 6ca24c10b2b3fa28169976e04a9fd2f524250a44 | [
"Apache-2.0"
] | null | null | null | """
The media module provides a base :class:`Media` class for media objects that
are tracked by Elodie. The Media class provides some base functionality used
by all the media types, but isn't itself used to represent anything. Its
sub-classes (:class:`~elodie.media.audio.Audio`,
:class:`~elodie.media.photo.Photo`, and :class:`~elodie.media.video.Video`)
are used to represent the actual files.
.. moduleauthor:: Jaisen Mathai <jaisen@jmathai.com>
"""
from __future__ import print_function
# load modules
from elodie import constants
from elodie.dependencies import get_exiftool
from elodie.external.pyexiftool import ExifTool
from elodie.media.base import Base
class Media(Base):
"""The base class for all media objects.
:param str source: The fully qualified path to the video file.
"""
__name__ = 'Media'
d_coordinates = {
'latitude': 'latitude_ref',
'longitude': 'longitude_ref'
}
def __init__(self, source=None):
super(Media, self).__init__(source)
self.exif_map = {
'date_taken': [
'EXIF:DateTimeOriginal',
'EXIF:CreateDate',
'EXIF:ModifyDate'
]
}
self.album_keys = ['XMP-xmpDM:Album', 'XMP:Album']
self.title_key = 'XMP:Title'
self.latitude_keys = ['EXIF:GPSLatitude']
self.longitude_keys = ['EXIF:GPSLongitude']
self.latitude_ref_key = 'EXIF:GPSLatitudeRef'
self.longitude_ref_key = 'EXIF:GPSLongitudeRef'
self.set_gps_ref = True
self.exiftool_addedargs = [
'-overwrite_original',
u'-config',
u'"{}"'.format(constants.exiftool_config)
]
def get_album(self):
"""Get album from EXIF
:returns: None or string
"""
if(not self.is_valid()):
return None
exiftool_attributes = self.get_exiftool_attributes()
if exiftool_attributes is None:
return None
for album_key in self.album_keys:
if album_key in exiftool_attributes:
return exiftool_attributes[album_key]
return None
def get_coordinate(self, type='latitude'):
"""Get latitude or longitude of media from EXIF
:param str type: Type of coordinate to get. Either "latitude" or
"longitude".
:returns: float or None if not present in EXIF or a non-photo file
"""
exif = self.get_exiftool_attributes()
if not exif:
return None
# The lat/lon _keys array has an order of precedence.
# The first key is writable and we will give the writable
# key precence when reading.
direction_multiplier = 1.0
for key in self.latitude_keys + self.longitude_keys:
if key not in exif:
continue
# Cast coordinate to a float due to a bug in exiftool's
# -json output format.
# https://github.com/jmathai/elodie/issues/171
# http://u88.n24.queensu.ca/exiftool/forum/index.php/topic,7952.0.html #noqa
this_coordinate = float(exif[key])
# TODO: verify that we need to check ref key
# when self.set_gps_ref != True
if type == 'latitude' and key in self.latitude_keys:
if self.latitude_ref_key in exif and \
exif[self.latitude_ref_key] == 'S':
direction_multiplier = -1.0
return this_coordinate * direction_multiplier
elif type == 'longitude' and key in self.longitude_keys:
if self.longitude_ref_key in exif and \
exif[self.longitude_ref_key] == 'W':
direction_multiplier = -1.0
return this_coordinate * direction_multiplier
return None
def get_exiftool_attributes(self):
"""Get attributes for the media object from exiftool.
:returns: dict, or False if exiftool was not available.
"""
source = self.source
exiftool = get_exiftool()
if(exiftool is None):
return False
with ExifTool(addedargs=self.exiftool_addedargs) as et:
metadata = et.get_metadata(source)
if not metadata:
return False
return metadata
def get_title(self):
"""Get the title for a photo of video
:returns: str or None if no title is set or not a valid media type
"""
if(not self.is_valid()):
return None
exiftool_attributes = self.get_exiftool_attributes()
if exiftool_attributes is None:
return None
if(self.title_key not in exiftool_attributes):
return None
return exiftool_attributes[self.title_key]
def reset_cache(self):
"""Resets any internal cache
"""
self.exiftool_attributes = None
super(Media, self).reset_cache()
def set_album(self, album):
"""Set album for a photo
:param str name: Name of album
:returns: bool
"""
if(not self.is_valid()):
return None
tags = {self.album_keys[0]: album}
status = self.__set_tags(tags)
self.reset_cache()
return status
def set_date_taken(self, time):
"""Set the date/time a photo was taken.
:param datetime time: datetime object of when the photo was taken
:returns: bool
"""
if(time is None):
return False
tags = {}
formatted_time = time.strftime('%Y:%m:%d %H:%M:%S')
for key in self.exif_map['date_taken']:
tags[key] = formatted_time
status = self.__set_tags(tags)
self.reset_cache()
return status
def set_location(self, latitude, longitude):
if(not self.is_valid()):
return None
# The lat/lon _keys array has an order of precedence.
# The first key is writable and we will give the writable
# key precence when reading.
tags = {
self.latitude_keys[0]: latitude,
self.longitude_keys[0]: longitude,
}
# If self.set_gps_ref == True then it means we are writing an EXIF
# GPS tag which requires us to set the reference key.
# That's because the lat/lon are absolute values.
if self.set_gps_ref:
if latitude < 0:
tags[self.latitude_ref_key] = 'S'
if longitude < 0:
tags[self.longitude_ref_key] = 'W'
status = self.__set_tags(tags)
self.reset_cache()
return status
def set_title(self, title):
"""Set title for a photo.
:param str title: Title of the photo.
:returns: bool
"""
if(not self.is_valid()):
return None
if(title is None):
return None
tags = {self.title_key: title}
status = self.__set_tags(tags)
self.reset_cache()
return status
def __set_tags(self, tags):
if(not self.is_valid()):
return None
source = self.source
status = ''
with ExifTool(addedargs=self.exiftool_addedargs) as et:
status = et.set_tags(tags, source)
return status != ''
| 30.081633 | 88 | 0.589281 | from __future__ import print_function
from elodie import constants
from elodie.dependencies import get_exiftool
from elodie.external.pyexiftool import ExifTool
from elodie.media.base import Base
class Media(Base):
__name__ = 'Media'
d_coordinates = {
'latitude': 'latitude_ref',
'longitude': 'longitude_ref'
}
def __init__(self, source=None):
super(Media, self).__init__(source)
self.exif_map = {
'date_taken': [
'EXIF:DateTimeOriginal',
'EXIF:CreateDate',
'EXIF:ModifyDate'
]
}
self.album_keys = ['XMP-xmpDM:Album', 'XMP:Album']
self.title_key = 'XMP:Title'
self.latitude_keys = ['EXIF:GPSLatitude']
self.longitude_keys = ['EXIF:GPSLongitude']
self.latitude_ref_key = 'EXIF:GPSLatitudeRef'
self.longitude_ref_key = 'EXIF:GPSLongitudeRef'
self.set_gps_ref = True
self.exiftool_addedargs = [
'-overwrite_original',
u'-config',
u'"{}"'.format(constants.exiftool_config)
]
def get_album(self):
if(not self.is_valid()):
return None
exiftool_attributes = self.get_exiftool_attributes()
if exiftool_attributes is None:
return None
for album_key in self.album_keys:
if album_key in exiftool_attributes:
return exiftool_attributes[album_key]
return None
def get_coordinate(self, type='latitude'):
exif = self.get_exiftool_attributes()
if not exif:
return None
direction_multiplier = 1.0
for key in self.latitude_keys + self.longitude_keys:
if key not in exif:
continue
# -json output format.
# https://github.com/jmathai/elodie/issues/171
# http://u88.n24.queensu.ca/exiftool/forum/index.php/topic,7952.0.html #noqa
this_coordinate = float(exif[key])
# TODO: verify that we need to check ref key
# when self.set_gps_ref != True
if type == 'latitude' and key in self.latitude_keys:
if self.latitude_ref_key in exif and \
exif[self.latitude_ref_key] == 'S':
direction_multiplier = -1.0
return this_coordinate * direction_multiplier
elif type == 'longitude' and key in self.longitude_keys:
if self.longitude_ref_key in exif and \
exif[self.longitude_ref_key] == 'W':
direction_multiplier = -1.0
return this_coordinate * direction_multiplier
return None
def get_exiftool_attributes(self):
source = self.source
exiftool = get_exiftool()
if(exiftool is None):
return False
with ExifTool(addedargs=self.exiftool_addedargs) as et:
metadata = et.get_metadata(source)
if not metadata:
return False
return metadata
def get_title(self):
if(not self.is_valid()):
return None
exiftool_attributes = self.get_exiftool_attributes()
if exiftool_attributes is None:
return None
if(self.title_key not in exiftool_attributes):
return None
return exiftool_attributes[self.title_key]
def reset_cache(self):
self.exiftool_attributes = None
super(Media, self).reset_cache()
def set_album(self, album):
if(not self.is_valid()):
return None
tags = {self.album_keys[0]: album}
status = self.__set_tags(tags)
self.reset_cache()
return status
def set_date_taken(self, time):
if(time is None):
return False
tags = {}
formatted_time = time.strftime('%Y:%m:%d %H:%M:%S')
for key in self.exif_map['date_taken']:
tags[key] = formatted_time
status = self.__set_tags(tags)
self.reset_cache()
return status
def set_location(self, latitude, longitude):
if(not self.is_valid()):
return None
# The lat/lon _keys array has an order of precedence.
# The first key is writable and we will give the writable
# key precence when reading.
tags = {
self.latitude_keys[0]: latitude,
self.longitude_keys[0]: longitude,
}
# If self.set_gps_ref == True then it means we are writing an EXIF
# GPS tag which requires us to set the reference key.
# That's because the lat/lon are absolute values.
if self.set_gps_ref:
if latitude < 0:
tags[self.latitude_ref_key] = 'S'
if longitude < 0:
tags[self.longitude_ref_key] = 'W'
status = self.__set_tags(tags)
self.reset_cache()
return status
def set_title(self, title):
if(not self.is_valid()):
return None
if(title is None):
return None
tags = {self.title_key: title}
status = self.__set_tags(tags)
self.reset_cache()
return status
def __set_tags(self, tags):
if(not self.is_valid()):
return None
source = self.source
status = ''
with ExifTool(addedargs=self.exiftool_addedargs) as et:
status = et.set_tags(tags, source)
return status != ''
| true | true |
f72119fc9448d568049ba81365d0643f5fc6eaa0 | 7,330 | py | Python | src/satlas2/models/hfsModel.py | woutergins/satlas2 | 51afdc445c8c603372bb26abe19d1eb7bd3f3f24 | [
"MIT"
] | null | null | null | src/satlas2/models/hfsModel.py | woutergins/satlas2 | 51afdc445c8c603372bb26abe19d1eb7bd3f3f24 | [
"MIT"
] | null | null | null | src/satlas2/models/hfsModel.py | woutergins/satlas2 | 51afdc445c8c603372bb26abe19d1eb7bd3f3f24 | [
"MIT"
] | null | null | null | from satlas2.core import Model, Parameter
import numpy as np
from scipy.special import wofz
from sympy.physics.wigner import wigner_6j, wigner_3j
__all__ = ['HFS']
sqrt2 = 2 ** 0.5
sqrt2log2t2 = 2 * np.sqrt(2 * np.log(2))
log2 = np.log(2)
class HFS(Model):
def __init__(self, I, J, A=[0, 0], B=[0, 0], C=[0, 0], df=0, fwhm=50, bkg=1, name=None, N=None, offset=0, poisson=0, scale=1.0, racah=True, prefunc=None):
super().__init__(name=name, prefunc=prefunc)
J1, J2 = J
lower_F = np.arange(abs(I - J1), I+J1+1, 1)
upper_F = np.arange(abs(I - J2), I+J2+1, 1)
self.lines = []
self.intensities = {}
self.scaling_Al = {}
self.scaling_Bl = {}
self.scaling_Cl = {}
self.scaling_Au = {}
self.scaling_Bu = {}
self.scaling_Cu = {}
for i, F1 in enumerate(lower_F):
for j, F2 in enumerate(upper_F):
if abs(F2 - F1) <= 1 and not F2 == F1 == 0.0:
if F1 % 1 == 0:
F1_str = '{:.0f}'.format(F1)
else:
F1_str = '{:.0f}_2'.format(2*F1)
if F2 % 1 == 0:
F2_str = '{:.0f}'.format(F2)
else:
F2_str = '{:.0f}_2'.format(2*F2)
line = '{}to{}'.format(F1_str, F2_str)
self.lines.append(line)
C1, D1, E1 = self.calcShift(I, J1, F1)
C2, D2, E2 = self.calcShift(I, J2, F2)
self.scaling_Al[line] = C1
self.scaling_Bl[line] = D1
self.scaling_Cl[line] = E1
self.scaling_Au[line] = C2
self.scaling_Bu[line] = D2
self.scaling_Cu[line] = E2
intens = float((2 * F1 + 1) * (2 * F2 + 1) * \
wigner_6j(J2, F2, I, F1, J1, 1.0) ** 2) # DO NOT REMOVE CAST TO FLOAT!!!
self.intensities['Amp'+line] = Parameter(value=intens, min=0, vary=not racah)
norm = max([p.value for p in self.intensities.values()])
for n, v in self.intensities.items():
v.value /= norm
pars = {'centroid': Parameter(value=df),
'Al': Parameter(value=A[0]),
'Au': Parameter(value=A[1]),
'Bl': Parameter(value=B[0]),
'Bu': Parameter(value=B[1]),
'Cl': Parameter(value=C[0], vary=False),
'Cu': Parameter(value=C[1], vary=False),
'bkg': Parameter(value=bkg),
'FWHMG': Parameter(value=fwhm, min=0.01),
'FWHML': Parameter(value=fwhm, min=0.01),
'scale': Parameter(value=scale, min=0, vary=racah)}
if N is not None:
pars['N'] = Parameter(value=N, vary=False)
pars['Offset'] = Parameter(value=offset)
pars['Poisson'] = Parameter(value=poisson, min=0, max=1)
self.f = self.fShifted
else:
self.f = self.fUnshifted
pars = {**pars, **self.intensities}
self.params = pars
if I < 2 or J1 < 2:
self.params['Cl'].vary = False
if I < 2 or J2 < 2:
self.params['Cu'].vary = False
if I < 1 or J1 < 1:
self.params['Bl'].vary = False
if I < 1 or J2 < 1:
self.params['Bu'].vary = False
if I == 0 or J1 == 0:
self.params['Al'].vary = False
if I == 0 or J2 == 0:
self.params['Au'].vary = False
self.xtransformed = None
self.xhashed = None
def fUnshifted(self, x):
centroid = self.params['centroid'].value
Al = self.params['Al'].value
Au = self.params['Au'].value
Bl = self.params['Bl'].value
Bu = self.params['Bu'].value
Cl = self.params['Cl'].value
Cu = self.params['Cu'].value
FWHMG = self.params['FWHMG'].value
FWHML = self.params['FWHML'].value
scale = self.params['scale'].value
bkg = self.params['bkg'].value
result = np.zeros(len(x))
x = self.transform(x)
for line in self.lines:
pos = centroid + Au * self.scaling_Au[line] + Bu * self.scaling_Bu[line] + Cu * self.scaling_Cu[line] - Al * self.scaling_Al[line] - Bl * self.scaling_Bl[line] - Cl * self.scaling_Cl[line]
result += self.params['Amp' + line].value * self.peak(x - pos, FWHMG, FWHML)
return scale * result + bkg
def fShifted(self, x):
centroid = self.params['centroid'].value
Al = self.params['Al'].value
Au = self.params['Au'].value
Bl = self.params['Bl'].value
Bu = self.params['Bu'].value
FWHMG = self.params['FWHMG'].value
FWHML = self.params['FWHML'].value
scale = self.params['scale'].value
N = self.params['N'].value
offset = self.params['Offset'].value
poisson = self.params['Poisson'].value
bkg = self.params['bkg'].value
result = np.zeros(len(x))
for line in self.lines:
pos = centroid + Au * self.scaling_Au[line] + Bu * self.scaling_Bu[line] + Cu * self.scaling_Cu[line] - Al * self.scaling_Al[line] - Bl * self.scaling_Bl[line] - Cl * self.scaling_Cl[line]
for i in range(N + 1):
if self.prefunc:
result += self.params['Amp' + line].value * self.peak(self.prefunc(x - i * offset) - pos, FWHMG, FWHML) * (poisson**i)/np.math.factorial(i)
else:
result += self.params['Amp' + line].value * self.peak(x - pos - i * offset, FWHMG, FWHML) * (poisson**i)/np.math.factorial(i)
return scale * result + bkg
def peak(self, x, FWHMG, FWHML):
z = self.preparePeak(x, FWHMG, FWHML)
n = self.norm(FWHML, FWHMG)
ret = wofz(z).real
return ret/n
def norm(self, FWHML, FWHMG):
return wofz(1j * FWHML / (FWHMG * sqrt2)).real
def preparePeak(self, x, FWHMG, FWHML):
sigma, gamma = FWHMG / sqrt2log2t2, FWHML / 2
z = (x + 1j * gamma) / (sigma * sqrt2)
return z
def calcShift(self, I, J, F):
phase = (-1)**(I+J+F)
contrib = []
for k in range(1, 4):
n = float(wigner_6j(I, J, F, J, I, k))
d = float(wigner_3j(I, k, I, -I, 0, I) * wigner_3j(J, k, J, -J, 0, J))
shift = phase * n / d
if not np.isfinite(shift):
contrib.append(0)
else:
if k == 1:
shift = shift * (I*J)
elif k == 2:
shift = shift / 4
contrib.append(shift)
return contrib
def pos(self):
centroid = self.params['centroid'].value
Al = self.params['Al'].value
Au = self.params['Au'].value
Bl = self.params['Bl'].value
Bu = self.params['Bu'].value
Cl = self.params['Cl'].value
Cu = self.params['Cu'].value
pos = []
for line in self.lines:
pos.append(centroid + Au * self.scaling_Au[line] + Bu * self.scaling_Bu[line] + Cu * self.scaling_Cu[line] - Al * self.scaling_Al[line] - Bl * self.scaling_Bl[line] - Cl * self.scaling_Cl[line])
return pos
| 38.783069 | 206 | 0.502456 | from satlas2.core import Model, Parameter
import numpy as np
from scipy.special import wofz
from sympy.physics.wigner import wigner_6j, wigner_3j
__all__ = ['HFS']
sqrt2 = 2 ** 0.5
sqrt2log2t2 = 2 * np.sqrt(2 * np.log(2))
log2 = np.log(2)
class HFS(Model):
def __init__(self, I, J, A=[0, 0], B=[0, 0], C=[0, 0], df=0, fwhm=50, bkg=1, name=None, N=None, offset=0, poisson=0, scale=1.0, racah=True, prefunc=None):
super().__init__(name=name, prefunc=prefunc)
J1, J2 = J
lower_F = np.arange(abs(I - J1), I+J1+1, 1)
upper_F = np.arange(abs(I - J2), I+J2+1, 1)
self.lines = []
self.intensities = {}
self.scaling_Al = {}
self.scaling_Bl = {}
self.scaling_Cl = {}
self.scaling_Au = {}
self.scaling_Bu = {}
self.scaling_Cu = {}
for i, F1 in enumerate(lower_F):
for j, F2 in enumerate(upper_F):
if abs(F2 - F1) <= 1 and not F2 == F1 == 0.0:
if F1 % 1 == 0:
F1_str = '{:.0f}'.format(F1)
else:
F1_str = '{:.0f}_2'.format(2*F1)
if F2 % 1 == 0:
F2_str = '{:.0f}'.format(F2)
else:
F2_str = '{:.0f}_2'.format(2*F2)
line = '{}to{}'.format(F1_str, F2_str)
self.lines.append(line)
C1, D1, E1 = self.calcShift(I, J1, F1)
C2, D2, E2 = self.calcShift(I, J2, F2)
self.scaling_Al[line] = C1
self.scaling_Bl[line] = D1
self.scaling_Cl[line] = E1
self.scaling_Au[line] = C2
self.scaling_Bu[line] = D2
self.scaling_Cu[line] = E2
intens = float((2 * F1 + 1) * (2 * F2 + 1) * \
wigner_6j(J2, F2, I, F1, J1, 1.0) ** 2)
self.intensities['Amp'+line] = Parameter(value=intens, min=0, vary=not racah)
norm = max([p.value for p in self.intensities.values()])
for n, v in self.intensities.items():
v.value /= norm
pars = {'centroid': Parameter(value=df),
'Al': Parameter(value=A[0]),
'Au': Parameter(value=A[1]),
'Bl': Parameter(value=B[0]),
'Bu': Parameter(value=B[1]),
'Cl': Parameter(value=C[0], vary=False),
'Cu': Parameter(value=C[1], vary=False),
'bkg': Parameter(value=bkg),
'FWHMG': Parameter(value=fwhm, min=0.01),
'FWHML': Parameter(value=fwhm, min=0.01),
'scale': Parameter(value=scale, min=0, vary=racah)}
if N is not None:
pars['N'] = Parameter(value=N, vary=False)
pars['Offset'] = Parameter(value=offset)
pars['Poisson'] = Parameter(value=poisson, min=0, max=1)
self.f = self.fShifted
else:
self.f = self.fUnshifted
pars = {**pars, **self.intensities}
self.params = pars
if I < 2 or J1 < 2:
self.params['Cl'].vary = False
if I < 2 or J2 < 2:
self.params['Cu'].vary = False
if I < 1 or J1 < 1:
self.params['Bl'].vary = False
if I < 1 or J2 < 1:
self.params['Bu'].vary = False
if I == 0 or J1 == 0:
self.params['Al'].vary = False
if I == 0 or J2 == 0:
self.params['Au'].vary = False
self.xtransformed = None
self.xhashed = None
def fUnshifted(self, x):
centroid = self.params['centroid'].value
Al = self.params['Al'].value
Au = self.params['Au'].value
Bl = self.params['Bl'].value
Bu = self.params['Bu'].value
Cl = self.params['Cl'].value
Cu = self.params['Cu'].value
FWHMG = self.params['FWHMG'].value
FWHML = self.params['FWHML'].value
scale = self.params['scale'].value
bkg = self.params['bkg'].value
result = np.zeros(len(x))
x = self.transform(x)
for line in self.lines:
pos = centroid + Au * self.scaling_Au[line] + Bu * self.scaling_Bu[line] + Cu * self.scaling_Cu[line] - Al * self.scaling_Al[line] - Bl * self.scaling_Bl[line] - Cl * self.scaling_Cl[line]
result += self.params['Amp' + line].value * self.peak(x - pos, FWHMG, FWHML)
return scale * result + bkg
def fShifted(self, x):
centroid = self.params['centroid'].value
Al = self.params['Al'].value
Au = self.params['Au'].value
Bl = self.params['Bl'].value
Bu = self.params['Bu'].value
FWHMG = self.params['FWHMG'].value
FWHML = self.params['FWHML'].value
scale = self.params['scale'].value
N = self.params['N'].value
offset = self.params['Offset'].value
poisson = self.params['Poisson'].value
bkg = self.params['bkg'].value
result = np.zeros(len(x))
for line in self.lines:
pos = centroid + Au * self.scaling_Au[line] + Bu * self.scaling_Bu[line] + Cu * self.scaling_Cu[line] - Al * self.scaling_Al[line] - Bl * self.scaling_Bl[line] - Cl * self.scaling_Cl[line]
for i in range(N + 1):
if self.prefunc:
result += self.params['Amp' + line].value * self.peak(self.prefunc(x - i * offset) - pos, FWHMG, FWHML) * (poisson**i)/np.math.factorial(i)
else:
result += self.params['Amp' + line].value * self.peak(x - pos - i * offset, FWHMG, FWHML) * (poisson**i)/np.math.factorial(i)
return scale * result + bkg
def peak(self, x, FWHMG, FWHML):
z = self.preparePeak(x, FWHMG, FWHML)
n = self.norm(FWHML, FWHMG)
ret = wofz(z).real
return ret/n
def norm(self, FWHML, FWHMG):
return wofz(1j * FWHML / (FWHMG * sqrt2)).real
def preparePeak(self, x, FWHMG, FWHML):
sigma, gamma = FWHMG / sqrt2log2t2, FWHML / 2
z = (x + 1j * gamma) / (sigma * sqrt2)
return z
def calcShift(self, I, J, F):
phase = (-1)**(I+J+F)
contrib = []
for k in range(1, 4):
n = float(wigner_6j(I, J, F, J, I, k))
d = float(wigner_3j(I, k, I, -I, 0, I) * wigner_3j(J, k, J, -J, 0, J))
shift = phase * n / d
if not np.isfinite(shift):
contrib.append(0)
else:
if k == 1:
shift = shift * (I*J)
elif k == 2:
shift = shift / 4
contrib.append(shift)
return contrib
def pos(self):
centroid = self.params['centroid'].value
Al = self.params['Al'].value
Au = self.params['Au'].value
Bl = self.params['Bl'].value
Bu = self.params['Bu'].value
Cl = self.params['Cl'].value
Cu = self.params['Cu'].value
pos = []
for line in self.lines:
pos.append(centroid + Au * self.scaling_Au[line] + Bu * self.scaling_Bu[line] + Cu * self.scaling_Cu[line] - Al * self.scaling_Al[line] - Bl * self.scaling_Bl[line] - Cl * self.scaling_Cl[line])
return pos
| true | true |
f7211a6c4ae21fd092ed3210d9ed20271e7afe65 | 19,979 | py | Python | collect/TwHistory.py | mcuiteallen/stock | 06c56db6c712ab88fabdc67a8812869ad4180f6f | [
"MIT"
] | null | null | null | collect/TwHistory.py | mcuiteallen/stock | 06c56db6c712ab88fabdc67a8812869ad4180f6f | [
"MIT"
] | null | null | null | collect/TwHistory.py | mcuiteallen/stock | 06c56db6c712ab88fabdc67a8812869ad4180f6f | [
"MIT"
] | null | null | null | import calendar
import math
import pandas as pd
import time
import twstock
import requests
from datetime import datetime, timedelta
from dateutil import relativedelta
from db.Connection import session
from enum import Enum
from model.StockHistory import StockHistory
from sys import float_info
from talib import abstract
class HistoryType(Enum):
DAY = ("0", "日", "短線")
WEEK = ("1", "週", "中短線")
MONTH = ("2", "月", "中長線")
class HistoryTypeTo(Enum):
DB = 0
HUMAN = 1
EXPLAIN = 2
class TwHistory:
"""TwHistory class"""
dateFormatForTwStock = None
dateFormat = None
rsiDict = None
williamsDict = None
macdDict = None
bbandDict = None
def __init__(self):
self.dateFormatForTwStock = "%Y/%m/%d"
self.dateFormat = "%Y-%m-%d"
def transformStrToDateTimeForTwStock(self, targetStr):
return datetime.strptime(targetStr, self.dateFormatForTwStock)
def transformStrToDateTime(self, targetStr):
return datetime.strptime(targetStr, self.dateFormat)
def transformDateTimeToStr(self, date):
return date.strftime(self.dateFormat)
def retIfNaN(self, num):
if math.isnan(num):
return None
else:
return num
def createDataFrame(self, history):
df = pd.DataFrame([h.as_simple_dict() for h in history])
df['date'] = pd.to_datetime(df['date'])
df.set_index('date', inplace=True)
return df
def deleteHistory(self, code, type, startDate, endDate):
session.query(StockHistory).\
filter(StockHistory.code == code).\
filter(StockHistory.type == type).\
filter(StockHistory.date >= self.transformDateTimeToStr(startDate)).\
filter(StockHistory.date <= self.transformDateTimeToStr(endDate)).\
delete()
session.commit()
def calculateRSI(self, df):
rsi = abstract.RSI(df, timeperiod=5)
self.rsiDict = {}
for index, number in rsi.iteritems():
self.rsiDict[self.transformDateTimeToStr(index)] = number
def calculateWilliams(self, df):
williams = abstract.WILLR(df, timeperiod=5)
self.williamsDict = {}
for index, number in williams.iteritems():
self.williamsDict[self.transformDateTimeToStr(index)] = number
def calculateMACD(self, df):
macd = abstract.MACD(df)
self.macdDict = {}
for index, row in macd.iterrows():
self.macdDict[self.transformDateTimeToStr(index)] = row
def calculateBBAND(self, df):
bband = abstract.BBANDS(df, timeperiod=22)
self.bbandDict = {}
for index, row in bband.iterrows():
self.bbandDict[self.transformDateTimeToStr(index)] = row
def updateHistoryTechnicalIndicator(self, history):
date = history.date
updateFlag = False
if history.rsi is None:
history.rsi = self.retIfNaN(self.rsiDict[date])
updateFlag = updateFlag or history.rsi is not None
if history.williams is None:
history.williams = self.retIfNaN(self.williamsDict[date])
updateFlag = updateFlag or history.williams is not None
if history.macd is None:
history.macd = self.retIfNaN(self.macdDict[date].macd)
updateFlag = updateFlag or history.macd is not None
if history.macdsignal is None:
history.macdsignal = self.retIfNaN(self.macdDict[date].macdsignal)
updateFlag = updateFlag or history.macdsignal is not None
if history.macdhist is None:
history.macdhist = self.retIfNaN(self.macdDict[date].macdhist)
updateFlag = updateFlag or history.macdhist is not None
if history.upperband is None:
history.upperband = self.retIfNaN(self.bbandDict[date].upperband)
updateFlag = updateFlag or history.upperband is not None
if history.middleband is None:
history.middleband = self.retIfNaN(self.bbandDict[date].middleband)
updateFlag = updateFlag or history.middleband is not None
if history.lowerband is None:
history.lowerband = self.retIfNaN(self.bbandDict[date].lowerband)
updateFlag = updateFlag or history.lowerband is not None
if updateFlag:
session.merge(history)
def dayHistory(self):
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and k == '3707':
print("dayHistory code: " + k)
dayType = self.translate(HistoryType.DAY, HistoryTypeTo.DB) #get type value for db
history = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == dayType).\
order_by(StockHistory.date.desc()).\
first()
nowDate = datetime.now()
endDateStr = self.transformDateTimeToStr(nowDate)
startDateStr = self.transformDateTimeToStr(self.transformStrToDateTimeForTwStock(v.start)) if history is None else history.date #如果DB撈的到相對應條件的資料,就只抓最後一天
self.finmindtrade(k, startDateStr, endDateStr, dayType)
def weekHistory(self):
today = self.transformStrToDateTime(self.transformDateTimeToStr(datetime.now()))
weekStart = today - timedelta(days=today.weekday())
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and self.isHistoryExist(k):
print("weekHistory code: " + k)
latestHistoryWeek = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.WEEK, HistoryTypeTo.DB)).\
order_by(StockHistory.date.desc()).\
first()
startdate = self.transformStrToDateTimeForTwStock(v.start) if latestHistoryWeek is None else self.transformStrToDateTime(latestHistoryWeek.date)
weekStartPast = startdate - timedelta(days=startdate.weekday())
weekEndPast = weekStartPast + timedelta(days=6)
while weekStartPast <= weekStart:
self.deleteHistory(k, self.translate(HistoryType.WEEK, HistoryTypeTo.DB), weekStartPast, weekEndPast)
historyWeek = StockHistory(code=k, type=self.translate(HistoryType.WEEK, HistoryTypeTo.DB),
capacity=0, turnover=0, high=0, low=float_info.max, close=0)
firstFlag = True
for historyDay in session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\
filter(StockHistory.date >= self.transformDateTimeToStr(weekStartPast)).\
filter(StockHistory.date <= self.transformDateTimeToStr(weekEndPast)).\
order_by(StockHistory.date.asc()).\
all():
historyWeek.date = self.transformDateTimeToStr(weekStartPast)
historyWeek.close = historyDay.close
historyWeek.capacity += historyDay.capacity
historyWeek.turnover += historyDay.turnover
if firstFlag:
historyWeek.open = historyDay.open
firstFlag = False
historyWeek.high = max(historyWeek.high, historyDay.high)
historyWeek.low = min(historyWeek.low, historyDay.low)
if not firstFlag:
session.merge(historyWeek)
weekStartPast += timedelta(days=7)
weekEndPast += timedelta(days=7)
session.commit()
def monthHistory(self):
today = self.transformStrToDateTime(self.transformDateTimeToStr(datetime.now()))
monthStart = today.replace(day=1)
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and self.isHistoryExist(k):
print("monthHistory code: " + k)
latestHistoryMonth = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.MONTH, HistoryTypeTo.DB)).\
order_by(StockHistory.date.desc()).\
first()
startdate = self.transformStrToDateTimeForTwStock(v.start) if latestHistoryMonth is None else self.transformStrToDateTime(latestHistoryMonth.date)
monthStartPast = startdate.replace(day=1)
monthEndPast = monthStartPast.replace(day=calendar.monthrange(monthStartPast.year, monthStartPast.month)[1])
while monthStartPast <= monthStart:
self.deleteHistory(k, self.translate(HistoryType.MONTH, HistoryTypeTo.DB), monthStartPast, monthEndPast)
historyMonth = StockHistory(code=k, type=self.translate(HistoryType.MONTH, HistoryTypeTo.DB),
capacity=0, turnover=0, high=0, low=float_info.max, close=0)
firstFlag = True
for historyDay in session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\
filter(StockHistory.date >= self.transformDateTimeToStr(monthStartPast)).\
filter(StockHistory.date <= self.transformDateTimeToStr(monthEndPast)).\
order_by(StockHistory.date.asc()).\
all():
historyMonth.date = self.transformDateTimeToStr(monthStartPast)
historyMonth.close = historyDay.close
historyMonth.capacity += historyDay.capacity
historyMonth.turnover += historyDay.turnover
if firstFlag:
historyMonth.open = historyDay.open
firstFlag = False
historyMonth.high = max(historyMonth.high, historyDay.high)
historyMonth.low = min(historyMonth.low, historyDay.low)
if not firstFlag:
session.merge(historyMonth)
monthStartPast = monthStartPast + relativedelta.relativedelta(months=1)
monthEndPast = monthStartPast.replace(day=calendar.monthrange(monthStartPast.year, monthStartPast.month)[1])
session.commit()
def technicalIndicator(self):
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and self.isHistoryExist(k):
for historyType in HistoryType:
print("technicalIndicator code: " + k + ", type: " + self.translate(historyType, HistoryTypeTo.HUMAN))
historyList = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(historyType, HistoryTypeTo.DB)).\
order_by(StockHistory.date.asc()).\
all()
if len(historyList) == 0:
continue
df = self.createDataFrame(historyList)
self.calculateRSI(df)
self.calculateWilliams(df)
self.calculateMACD(df)
self.calculateBBAND(df)
for history in historyList:
self.updateHistoryTechnicalIndicator(history)
session.commit()
def diverge(self, highRsi, lowRsi, highWilliams, lowWilliams):
turnoverDict = {}
nameDict = {}
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and self.isHistoryExist(k):
history = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\
order_by(StockHistory.date.desc()).\
first()
turnoverDict[k] = history.turnover
nameDict[k] = v.name
rankDict = {k: v for k, v in sorted(turnoverDict.items(), key=lambda item: item[1], reverse=True)}
print("按當日成交值由大至小排名,背離條件: rsi > " + str(highRsi) + " or rsi < " + str(lowRsi))
for rankIdx, code in enumerate(rankDict.keys()):
closePrice = None
divergeDict = {}
for historyType in HistoryType:
historyTypeHuman = self.translate(historyType, HistoryTypeTo.HUMAN)
historyTypeExplain = self.translate(historyType, HistoryTypeTo.EXPLAIN)
historyList = session.query(StockHistory).\
filter(StockHistory.code == code).\
filter(StockHistory.type == self.translate(historyType, HistoryTypeTo.DB)).\
filter(StockHistory.rsi.isnot(None)).\
order_by(StockHistory.date.desc()).\
limit(self.recentHistoryLimit(historyType)).\
all()
historyListLength = len(historyList)
if historyListLength > 0:
closePrice = historyList[0].close
if historyListLength > 1:
if self.isHighRsi(highRsi, historyList) and historyList[0].rsi > historyList[1].rsi and historyList[0].williams < historyList[1].williams:
divergeDict[historyTypeHuman + " 相鄰背離 " + historyTypeExplain + "看空"] = "rsi up williams down"
elif self.isLowRsi(lowRsi, historyList) and historyList[0].rsi < historyList[1].rsi and historyList[0].williams > historyList[1].williams:
divergeDict[historyTypeHuman + " 相鄰背離 " + historyTypeExplain + "看多"] = "rsi down williams up"
if historyListLength > 2:
highPeak = []
lowPeak = []
for i, history in enumerate(historyList):
if i == 0 or i == historyListLength - 1:
continue
if len(highPeak) < 2 and historyList[i-1].rsi < history.rsi and history.rsi > historyList[i+1].rsi:
highPeak.append(history)
if len(lowPeak) < 2 and historyList[i-1].rsi > history.rsi and history.rsi < historyList[i+1].rsi:
lowPeak.append(history)
if len(highPeak) == 2 and len(lowPeak) == 2:
break
if len(highPeak) == 2 and self.isHighRsi(highRsi, highPeak):
if highPeak[0].rsi > highPeak[1].rsi and highPeak[0].williams < highPeak[1].williams:
divergeDict[historyTypeHuman + " 波峰背離 " + historyTypeExplain + "看空: " + highPeak[1].date + " and " + highPeak[0].date] = "rsi up williams down"
elif highPeak[0].rsi < highPeak[1].rsi and highPeak[0].williams > highPeak[1].williams and highPeak[0].williams >= highWilliams:
for low in lowPeak:
if highPeak[0].date > low.date and highPeak[1].date < low.date and low.williams <= lowWilliams:
divergeDict[historyTypeHuman + " 波峰背離 反彈不過前高 " + historyTypeExplain + "看空: " + highPeak[1].date + " and " + highPeak[0].date] = "rsi down williams fast up"
break
if len(lowPeak) == 2 and self.isLowRsi(lowRsi, lowPeak):
if lowPeak[0].rsi < lowPeak[1].rsi and lowPeak[0].williams > lowPeak[1].williams:
divergeDict[historyTypeHuman + " 波谷背離 " + historyTypeExplain + "看多: " + lowPeak[1].date + " and " + lowPeak[0].date] = "rsi down williams up"
elif lowPeak[0].rsi > lowPeak[1].rsi and lowPeak[0].williams < lowPeak[1].williams and lowPeak[0].williams <= lowWilliams:
for high in highPeak:
if lowPeak[0].date > high.date and lowPeak[1].date < high.date and high.williams >= highWilliams:
divergeDict[historyTypeHuman + " 波谷背離 回測不過前低 " + historyTypeExplain + "看多: " + lowPeak[1].date + " and " + lowPeak[0].date] = "rsi up williams fast down"
break
if len(divergeDict) > 0:
print("code: " + code + ", name: " + nameDict[code] + ", rank: " + str(rankIdx+1) + "/" + str(len(rankDict)) + ", close price: " + str(closePrice))
for k, v in divergeDict.items():
print(k + " => " + v)
print("")
print("========================================================================================")
def isStockOrETF(self, type):
return type == "股票" or type == "ETF"
def isHistoryExist(self, code):
if code=='3707':
return session.query(StockHistory).\
filter(StockHistory.code == code).\
filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\
filter(StockHistory.date == self.transformDateTimeToStr(datetime.now())).\
first() is not None
return False
def isHighRsi(self, highRsi, historyList):
for i, history in enumerate(historyList):
if i < 2 and history.rsi < highRsi:
return False
elif i == 2:
break
return True
def isLowRsi(self, lowRsi, historyList):
for i, history in enumerate(historyList):
if i < 2 and history.rsi > lowRsi:
return False
elif i == 2:
break
return True
def recentHistoryLimit(self, historyType):
if historyType == HistoryType.DAY:
return 40
elif historyType == HistoryType.WEEK:
return 16
else:
return 6
def translate(self, historyType, historyTypeTo):
return historyType.value[historyTypeTo.value]
def finmindtrade(self, code, start, end, dayType):
url = "https://api.finmindtrade.com/api/v4/data"
parameter = {
"dataset": "TaiwanStockPrice",
"data_id": code,
"start_date": start,
"end_date": end,
"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJkYXRlIjoiMjAyMS0xMC0wMSAxNjoyMzoyNSIsInVzZXJfaWQiOiJtY3VpdGVhbGxlbiIsImlwIjoiMTE4LjE2My4xNDcuMTgyIn0.vXMykagq4kOKGrKOusgfAR3yhgcri0N_Wpe1Nb4DOiA"
}
resp = requests.get(url, params=parameter)
json = resp.json()
if json is not None:
for data in resp.json()["data"]:
history = StockHistory(code=code, type=dayType, date=data["date"],
capacity=data["Trading_Volume"], turnover=data["Trading_money"],
open=data["open"], high=data["max"], low=data["min"], close=data["close"])
session.merge(history)
session.commit()
time.sleep(6.1)
twHistory = TwHistory()
twHistory.dayHistory()
twHistory.weekHistory()
twHistory.monthHistory()
twHistory.technicalIndicator()
#twHistory.diverge(90, 10, -20, -80)
#twHistory.diverge(80, 20, -20, -80)
twHistory.diverge(70, 30, -20, -80) | 51.359897 | 207 | 0.569198 | import calendar
import math
import pandas as pd
import time
import twstock
import requests
from datetime import datetime, timedelta
from dateutil import relativedelta
from db.Connection import session
from enum import Enum
from model.StockHistory import StockHistory
from sys import float_info
from talib import abstract
class HistoryType(Enum):
DAY = ("0", "日", "短線")
WEEK = ("1", "週", "中短線")
MONTH = ("2", "月", "中長線")
class HistoryTypeTo(Enum):
DB = 0
HUMAN = 1
EXPLAIN = 2
class TwHistory:
dateFormatForTwStock = None
dateFormat = None
rsiDict = None
williamsDict = None
macdDict = None
bbandDict = None
def __init__(self):
self.dateFormatForTwStock = "%Y/%m/%d"
self.dateFormat = "%Y-%m-%d"
def transformStrToDateTimeForTwStock(self, targetStr):
return datetime.strptime(targetStr, self.dateFormatForTwStock)
def transformStrToDateTime(self, targetStr):
return datetime.strptime(targetStr, self.dateFormat)
def transformDateTimeToStr(self, date):
return date.strftime(self.dateFormat)
def retIfNaN(self, num):
if math.isnan(num):
return None
else:
return num
def createDataFrame(self, history):
df = pd.DataFrame([h.as_simple_dict() for h in history])
df['date'] = pd.to_datetime(df['date'])
df.set_index('date', inplace=True)
return df
def deleteHistory(self, code, type, startDate, endDate):
session.query(StockHistory).\
filter(StockHistory.code == code).\
filter(StockHistory.type == type).\
filter(StockHistory.date >= self.transformDateTimeToStr(startDate)).\
filter(StockHistory.date <= self.transformDateTimeToStr(endDate)).\
delete()
session.commit()
def calculateRSI(self, df):
rsi = abstract.RSI(df, timeperiod=5)
self.rsiDict = {}
for index, number in rsi.iteritems():
self.rsiDict[self.transformDateTimeToStr(index)] = number
def calculateWilliams(self, df):
williams = abstract.WILLR(df, timeperiod=5)
self.williamsDict = {}
for index, number in williams.iteritems():
self.williamsDict[self.transformDateTimeToStr(index)] = number
def calculateMACD(self, df):
macd = abstract.MACD(df)
self.macdDict = {}
for index, row in macd.iterrows():
self.macdDict[self.transformDateTimeToStr(index)] = row
def calculateBBAND(self, df):
bband = abstract.BBANDS(df, timeperiod=22)
self.bbandDict = {}
for index, row in bband.iterrows():
self.bbandDict[self.transformDateTimeToStr(index)] = row
def updateHistoryTechnicalIndicator(self, history):
date = history.date
updateFlag = False
if history.rsi is None:
history.rsi = self.retIfNaN(self.rsiDict[date])
updateFlag = updateFlag or history.rsi is not None
if history.williams is None:
history.williams = self.retIfNaN(self.williamsDict[date])
updateFlag = updateFlag or history.williams is not None
if history.macd is None:
history.macd = self.retIfNaN(self.macdDict[date].macd)
updateFlag = updateFlag or history.macd is not None
if history.macdsignal is None:
history.macdsignal = self.retIfNaN(self.macdDict[date].macdsignal)
updateFlag = updateFlag or history.macdsignal is not None
if history.macdhist is None:
history.macdhist = self.retIfNaN(self.macdDict[date].macdhist)
updateFlag = updateFlag or history.macdhist is not None
if history.upperband is None:
history.upperband = self.retIfNaN(self.bbandDict[date].upperband)
updateFlag = updateFlag or history.upperband is not None
if history.middleband is None:
history.middleband = self.retIfNaN(self.bbandDict[date].middleband)
updateFlag = updateFlag or history.middleband is not None
if history.lowerband is None:
history.lowerband = self.retIfNaN(self.bbandDict[date].lowerband)
updateFlag = updateFlag or history.lowerband is not None
if updateFlag:
session.merge(history)
def dayHistory(self):
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and k == '3707':
print("dayHistory code: " + k)
dayType = self.translate(HistoryType.DAY, HistoryTypeTo.DB)
history = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == dayType).\
order_by(StockHistory.date.desc()).\
first()
nowDate = datetime.now()
endDateStr = self.transformDateTimeToStr(nowDate)
startDateStr = self.transformDateTimeToStr(self.transformStrToDateTimeForTwStock(v.start)) if history is None else history.date
self.finmindtrade(k, startDateStr, endDateStr, dayType)
def weekHistory(self):
today = self.transformStrToDateTime(self.transformDateTimeToStr(datetime.now()))
weekStart = today - timedelta(days=today.weekday())
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and self.isHistoryExist(k):
print("weekHistory code: " + k)
latestHistoryWeek = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.WEEK, HistoryTypeTo.DB)).\
order_by(StockHistory.date.desc()).\
first()
startdate = self.transformStrToDateTimeForTwStock(v.start) if latestHistoryWeek is None else self.transformStrToDateTime(latestHistoryWeek.date)
weekStartPast = startdate - timedelta(days=startdate.weekday())
weekEndPast = weekStartPast + timedelta(days=6)
while weekStartPast <= weekStart:
self.deleteHistory(k, self.translate(HistoryType.WEEK, HistoryTypeTo.DB), weekStartPast, weekEndPast)
historyWeek = StockHistory(code=k, type=self.translate(HistoryType.WEEK, HistoryTypeTo.DB),
capacity=0, turnover=0, high=0, low=float_info.max, close=0)
firstFlag = True
for historyDay in session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\
filter(StockHistory.date >= self.transformDateTimeToStr(weekStartPast)).\
filter(StockHistory.date <= self.transformDateTimeToStr(weekEndPast)).\
order_by(StockHistory.date.asc()).\
all():
historyWeek.date = self.transformDateTimeToStr(weekStartPast)
historyWeek.close = historyDay.close
historyWeek.capacity += historyDay.capacity
historyWeek.turnover += historyDay.turnover
if firstFlag:
historyWeek.open = historyDay.open
firstFlag = False
historyWeek.high = max(historyWeek.high, historyDay.high)
historyWeek.low = min(historyWeek.low, historyDay.low)
if not firstFlag:
session.merge(historyWeek)
weekStartPast += timedelta(days=7)
weekEndPast += timedelta(days=7)
session.commit()
def monthHistory(self):
today = self.transformStrToDateTime(self.transformDateTimeToStr(datetime.now()))
monthStart = today.replace(day=1)
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and self.isHistoryExist(k):
print("monthHistory code: " + k)
latestHistoryMonth = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.MONTH, HistoryTypeTo.DB)).\
order_by(StockHistory.date.desc()).\
first()
startdate = self.transformStrToDateTimeForTwStock(v.start) if latestHistoryMonth is None else self.transformStrToDateTime(latestHistoryMonth.date)
monthStartPast = startdate.replace(day=1)
monthEndPast = monthStartPast.replace(day=calendar.monthrange(monthStartPast.year, monthStartPast.month)[1])
while monthStartPast <= monthStart:
self.deleteHistory(k, self.translate(HistoryType.MONTH, HistoryTypeTo.DB), monthStartPast, monthEndPast)
historyMonth = StockHistory(code=k, type=self.translate(HistoryType.MONTH, HistoryTypeTo.DB),
capacity=0, turnover=0, high=0, low=float_info.max, close=0)
firstFlag = True
for historyDay in session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\
filter(StockHistory.date >= self.transformDateTimeToStr(monthStartPast)).\
filter(StockHistory.date <= self.transformDateTimeToStr(monthEndPast)).\
order_by(StockHistory.date.asc()).\
all():
historyMonth.date = self.transformDateTimeToStr(monthStartPast)
historyMonth.close = historyDay.close
historyMonth.capacity += historyDay.capacity
historyMonth.turnover += historyDay.turnover
if firstFlag:
historyMonth.open = historyDay.open
firstFlag = False
historyMonth.high = max(historyMonth.high, historyDay.high)
historyMonth.low = min(historyMonth.low, historyDay.low)
if not firstFlag:
session.merge(historyMonth)
monthStartPast = monthStartPast + relativedelta.relativedelta(months=1)
monthEndPast = monthStartPast.replace(day=calendar.monthrange(monthStartPast.year, monthStartPast.month)[1])
session.commit()
def technicalIndicator(self):
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and self.isHistoryExist(k):
for historyType in HistoryType:
print("technicalIndicator code: " + k + ", type: " + self.translate(historyType, HistoryTypeTo.HUMAN))
historyList = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(historyType, HistoryTypeTo.DB)).\
order_by(StockHistory.date.asc()).\
all()
if len(historyList) == 0:
continue
df = self.createDataFrame(historyList)
self.calculateRSI(df)
self.calculateWilliams(df)
self.calculateMACD(df)
self.calculateBBAND(df)
for history in historyList:
self.updateHistoryTechnicalIndicator(history)
session.commit()
def diverge(self, highRsi, lowRsi, highWilliams, lowWilliams):
turnoverDict = {}
nameDict = {}
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and self.isHistoryExist(k):
history = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\
order_by(StockHistory.date.desc()).\
first()
turnoverDict[k] = history.turnover
nameDict[k] = v.name
rankDict = {k: v for k, v in sorted(turnoverDict.items(), key=lambda item: item[1], reverse=True)}
print("按當日成交值由大至小排名,背離條件: rsi > " + str(highRsi) + " or rsi < " + str(lowRsi))
for rankIdx, code in enumerate(rankDict.keys()):
closePrice = None
divergeDict = {}
for historyType in HistoryType:
historyTypeHuman = self.translate(historyType, HistoryTypeTo.HUMAN)
historyTypeExplain = self.translate(historyType, HistoryTypeTo.EXPLAIN)
historyList = session.query(StockHistory).\
filter(StockHistory.code == code).\
filter(StockHistory.type == self.translate(historyType, HistoryTypeTo.DB)).\
filter(StockHistory.rsi.isnot(None)).\
order_by(StockHistory.date.desc()).\
limit(self.recentHistoryLimit(historyType)).\
all()
historyListLength = len(historyList)
if historyListLength > 0:
closePrice = historyList[0].close
if historyListLength > 1:
if self.isHighRsi(highRsi, historyList) and historyList[0].rsi > historyList[1].rsi and historyList[0].williams < historyList[1].williams:
divergeDict[historyTypeHuman + " 相鄰背離 " + historyTypeExplain + "看空"] = "rsi up williams down"
elif self.isLowRsi(lowRsi, historyList) and historyList[0].rsi < historyList[1].rsi and historyList[0].williams > historyList[1].williams:
divergeDict[historyTypeHuman + " 相鄰背離 " + historyTypeExplain + "看多"] = "rsi down williams up"
if historyListLength > 2:
highPeak = []
lowPeak = []
for i, history in enumerate(historyList):
if i == 0 or i == historyListLength - 1:
continue
if len(highPeak) < 2 and historyList[i-1].rsi < history.rsi and history.rsi > historyList[i+1].rsi:
highPeak.append(history)
if len(lowPeak) < 2 and historyList[i-1].rsi > history.rsi and history.rsi < historyList[i+1].rsi:
lowPeak.append(history)
if len(highPeak) == 2 and len(lowPeak) == 2:
break
if len(highPeak) == 2 and self.isHighRsi(highRsi, highPeak):
if highPeak[0].rsi > highPeak[1].rsi and highPeak[0].williams < highPeak[1].williams:
divergeDict[historyTypeHuman + " 波峰背離 " + historyTypeExplain + "看空: " + highPeak[1].date + " and " + highPeak[0].date] = "rsi up williams down"
elif highPeak[0].rsi < highPeak[1].rsi and highPeak[0].williams > highPeak[1].williams and highPeak[0].williams >= highWilliams:
for low in lowPeak:
if highPeak[0].date > low.date and highPeak[1].date < low.date and low.williams <= lowWilliams:
divergeDict[historyTypeHuman + " 波峰背離 反彈不過前高 " + historyTypeExplain + "看空: " + highPeak[1].date + " and " + highPeak[0].date] = "rsi down williams fast up"
break
if len(lowPeak) == 2 and self.isLowRsi(lowRsi, lowPeak):
if lowPeak[0].rsi < lowPeak[1].rsi and lowPeak[0].williams > lowPeak[1].williams:
divergeDict[historyTypeHuman + " 波谷背離 " + historyTypeExplain + "看多: " + lowPeak[1].date + " and " + lowPeak[0].date] = "rsi down williams up"
elif lowPeak[0].rsi > lowPeak[1].rsi and lowPeak[0].williams < lowPeak[1].williams and lowPeak[0].williams <= lowWilliams:
for high in highPeak:
if lowPeak[0].date > high.date and lowPeak[1].date < high.date and high.williams >= highWilliams:
divergeDict[historyTypeHuman + " 波谷背離 回測不過前低 " + historyTypeExplain + "看多: " + lowPeak[1].date + " and " + lowPeak[0].date] = "rsi up williams fast down"
break
if len(divergeDict) > 0:
print("code: " + code + ", name: " + nameDict[code] + ", rank: " + str(rankIdx+1) + "/" + str(len(rankDict)) + ", close price: " + str(closePrice))
for k, v in divergeDict.items():
print(k + " => " + v)
print("")
print("========================================================================================")
def isStockOrETF(self, type):
return type == "股票" or type == "ETF"
def isHistoryExist(self, code):
if code=='3707':
return session.query(StockHistory).\
filter(StockHistory.code == code).\
filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\
filter(StockHistory.date == self.transformDateTimeToStr(datetime.now())).\
first() is not None
return False
def isHighRsi(self, highRsi, historyList):
for i, history in enumerate(historyList):
if i < 2 and history.rsi < highRsi:
return False
elif i == 2:
break
return True
def isLowRsi(self, lowRsi, historyList):
for i, history in enumerate(historyList):
if i < 2 and history.rsi > lowRsi:
return False
elif i == 2:
break
return True
def recentHistoryLimit(self, historyType):
if historyType == HistoryType.DAY:
return 40
elif historyType == HistoryType.WEEK:
return 16
else:
return 6
def translate(self, historyType, historyTypeTo):
return historyType.value[historyTypeTo.value]
def finmindtrade(self, code, start, end, dayType):
url = "https://api.finmindtrade.com/api/v4/data"
parameter = {
"dataset": "TaiwanStockPrice",
"data_id": code,
"start_date": start,
"end_date": end,
"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJkYXRlIjoiMjAyMS0xMC0wMSAxNjoyMzoyNSIsInVzZXJfaWQiOiJtY3VpdGVhbGxlbiIsImlwIjoiMTE4LjE2My4xNDcuMTgyIn0.vXMykagq4kOKGrKOusgfAR3yhgcri0N_Wpe1Nb4DOiA"
}
resp = requests.get(url, params=parameter)
json = resp.json()
if json is not None:
for data in resp.json()["data"]:
history = StockHistory(code=code, type=dayType, date=data["date"],
capacity=data["Trading_Volume"], turnover=data["Trading_money"],
open=data["open"], high=data["max"], low=data["min"], close=data["close"])
session.merge(history)
session.commit()
time.sleep(6.1)
twHistory = TwHistory()
twHistory.dayHistory()
twHistory.weekHistory()
twHistory.monthHistory()
twHistory.technicalIndicator()
twHistory.diverge(70, 30, -20, -80) | true | true |
f7211ab5f9fd402c221ac94f5f39ef29a6d25331 | 88,960 | py | Python | pandas/tests/arithmetic/test_datetime64.py | naomi172839/pandas | c5f11ab79e5553a28a91fc7036c8dcbfc8cbc697 | [
"BSD-3-Clause"
] | 6 | 2020-09-10T15:03:25.000Z | 2021-04-01T22:48:33.000Z | pandas/tests/arithmetic/test_datetime64.py | naomi172839/pandas | c5f11ab79e5553a28a91fc7036c8dcbfc8cbc697 | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/arithmetic/test_datetime64.py | naomi172839/pandas | c5f11ab79e5553a28a91fc7036c8dcbfc8cbc697 | [
"BSD-3-Clause"
] | 4 | 2020-02-07T05:05:32.000Z | 2020-05-11T06:06:17.000Z | # Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import datetime, timedelta
from itertools import product, starmap
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.compat.numpy import np_datetime64_compat
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray, TimedeltaArray
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(self, other, tz_naive_fixture):
# We don't parametrize this over box_with_array because listlike
# other plays poorly with assert_invalid_comparison reversed checks
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
assert_invalid_comparison(dta, other, tm.to_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], pd.Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
xbox = box if box is not pd.Index else np.ndarray
ts = pd.Timestamp.now(tz)
ser = pd.Series([ts, pd.NaT])
# FIXME: Can't transpose because that loses the tz dtype on
# the NaT column
obj = tm.box_expected(ser, box, transpose=False)
expected = pd.Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox, transpose=False)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[pd.Timestamp("2011-01-01"), NaT, pd.Timestamp("2011-01-03")],
[NaT, NaT, pd.Timestamp("2011-01-03")],
),
(
[pd.Timedelta("1 days"), NaT, pd.Timedelta("3 days")],
[NaT, NaT, pd.Timedelta("3 days")],
),
(
[pd.Period("2011-01", freq="M"), NaT, pd.Period("2011-03", freq="M")],
[NaT, NaT, pd.Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons(self, dtype, index_or_series, reverse, pair):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
# Series, Index
expected = Series([False, False, True])
tm.assert_series_equal(left == right, expected)
expected = Series([True, True, False])
tm.assert_series_equal(left != right, expected)
expected = Series([False, False, False])
tm.assert_series_equal(left < right, expected)
expected = Series([False, False, False])
tm.assert_series_equal(left > right, expected)
expected = Series([False, False, True])
tm.assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
tm.assert_series_equal(left <= right, expected)
def test_comparison_invalid(self, tz_naive_fixture, box_with_array):
# GH#4968
# invalid date/int comparisons
tz = tz_naive_fixture
ser = Series(range(5))
ser2 = Series(pd.date_range("20010101", periods=5, tz=tz))
ser = tm.box_expected(ser, box_with_array)
ser2 = tm.box_expected(ser2, box_with_array)
assert_invalid_comparison(ser, ser2, box_with_array)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
if box_with_array is tm.to_array and dtype is object:
# dont bother testing ndarray comparison methods as this fails
# on older numpys (since they check object identity)
return
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box_with_array)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = pd.Series(pd.date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = pd.Timestamp("nat")
ser[3] = pd.Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
ser = pd.Series([pd.Timestamp("2000-01-29 01:59:00"), "NaT"])
ser = tm.box_expected(ser, box_with_array)
result = ser != ser
expected = tm.box_expected([False, True], xbox)
tm.assert_equal(result, expected)
result = ser != ser[0]
expected = tm.box_expected([False, True], xbox)
tm.assert_equal(result, expected)
result = ser != ser[1]
expected = tm.box_expected([True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, False], xbox)
tm.assert_equal(result, expected)
result = ser == ser[0]
expected = tm.box_expected([True, False], xbox)
tm.assert_equal(result, expected)
result = ser == ser[1]
expected = tm.box_expected([False, False], xbox)
tm.assert_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.lt, operator.ge, operator.le],
)
def test_comparators(self, op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = op(arr, element)
index_result = op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
if box_with_array is tm.to_array and dtype is object:
# dont bother testing ndarray comparison methods as this fails
# on older numpys (since they check object identity)
return
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
left = pd.DatetimeIndex(
[pd.Timestamp("2011-01-01"), pd.NaT, pd.Timestamp("2011-01-03")]
)
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == pd.NaT, expected)
tm.assert_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != pd.NaT, expected)
tm.assert_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < pd.NaT, expected)
tm.assert_equal(pd.NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = pd.DatetimeIndex(
["2014-01-01", pd.NaT, "2014-03-01", pd.NaT, "2014-05-01", "2014-07-01"]
)
didx2 = pd.DatetimeIndex(
["2014-02-01", "2014-03-01", pd.NaT, pd.NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np_datetime64_compat("2014-02-01 00:00Z"),
np_datetime64_compat("2014-03-01 00:00Z"),
np_datetime64_compat("nat"),
np.datetime64("nat"),
np_datetime64_compat("2014-06-01 00:00Z"),
np_datetime64_compat("2014-07-01 00:00Z"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_comparison_tzawareness_compat(self, op, box_df_fail):
# GH#18162
box = box_df_fail
dr = pd.date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
op(dr, dz)
# FIXME: DataFrame case fails to raise for == and !=, wrong
# message for inequalities
with pytest.raises(TypeError, match=msg):
op(dr, list(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(list(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
# FIXME: DataFrame case fails to raise for == and !=, wrong
# message for inequalities
with pytest.raises(TypeError, match=msg):
op(dz, list(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(list(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == list(dr))
assert np.all(list(dr) == dr)
assert np.all(np.array(list(dr), dtype=object) == dr)
assert np.all(dr == np.array(list(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == list(dz))
assert np.all(list(dz) == dz)
assert np.all(np.array(list(dz), dtype=object) == dz)
assert np.all(dz == np.array(list(dz), dtype=object))
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_comparison_tzawareness_compat_scalars(self, op, box_with_array):
# GH#18162
dr = pd.date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = pd.Timestamp("2000-03-14 01:59")
ts_tz = pd.Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
with pytest.raises(TypeError, match=msg):
op(dz, ts)
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, op, other, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = pd.date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_nat_comparison_tzawareness(self, op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
dti = pd.DatetimeIndex(
["2014-01-01", pd.NaT, "2014-03-01", pd.NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, pd.NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), pd.NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
# tzawareness failure
dti != other
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = "Cannot compare type"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_dt64arr_iadd_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
def test_dt64arr_isub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = pd.date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = pd.DatetimeIndex(["NaT"] * 9, tz=tz)
# FIXME: fails with transpose=True due to tz-aware DataFrame
# transpose bug
obj = tm.box_expected(dti, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = pd.date_range("2015-12-31", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = pd.date_range("2016-01-02", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
pd.Timestamp("2013-01-01"),
pd.Timestamp("2013-01-01").to_pydatetime(),
pd.Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = pd.date_range("2013-01-01", periods=3)
idx = tm.box_expected(idx, box_with_array)
expected = pd.TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = pd.date_range("20130101", periods=3)
dtarr = tm.box_expected(dti, box_with_array)
expected = pd.TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = pd.date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = pd.Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = pd.DatetimeIndex([pd.NaT, pd.Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - pd.NaT
expected = pd.Series([pd.NaT, pd.NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - pd.NaT
expected = pd.Series([pd.NaT, pd.NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = pd.date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
warn = PerformanceWarning if box_with_array is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = pd.date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "cannot add"
with pytest.raises(TypeError, match=msg):
dtarr + dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals + dtarr
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
idx = tm.box_expected(idx, box_with_array)
msg = "cannot add"
with pytest.raises(TypeError, match=msg):
idx + Timestamp("2011-01-01")
with pytest.raises(TypeError, match=msg):
Timestamp("2011-01-01") + idx
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
pd.Period("2011-01-01", freq="D"),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: parametrize over the scalar being added? radd? sub?
offset = dates + pd.offsets.Hour(5)
tm.assert_equal(offset, expected)
offset = dates + np.timedelta64(5, "h")
tm.assert_equal(offset, expected)
offset = dates + timedelta(hours=5)
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.squeeze() if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, kwd in enumerate(relative_kwargs):
off = pd.DateOffset(**dict([kwd]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = pd.DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.squeeze() if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + pd.DateOffset(years=1)
result2 = pd.DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - pd.DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
# TODO: __sub__, __rsub__
def test_dt64arr_add_mixed_offset_array(self, box_with_array):
# GH#10699
# array of offsets
s = DatetimeIndex([Timestamp("2000-1-1"), Timestamp("2000-2-1")])
s = tm.box_expected(s, box_with_array)
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
other = pd.Index([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()])
other = tm.box_expected(other, box_with_array)
result = s + other
exp = DatetimeIndex([Timestamp("2001-1-1"), Timestamp("2000-2-29")])
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
# same offset
other = pd.Index(
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
)
other = tm.box_expected(other, box_with_array)
result = s + other
exp = DatetimeIndex([Timestamp("2001-1-1"), Timestamp("2001-2-1")])
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
# TODO: overlap with test_dt64arr_add_mixed_offset_array?
def test_dt64arr_add_sub_offset_ndarray(self, tz_naive_fixture, box_with_array):
# GH#18849
tz = tz_naive_fixture
dti = pd.date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
res = dtarr + other
expected = DatetimeIndex(
[dti[n] + other[n] for n in range(len(dti))], name=dti.name, freq="infer"
)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + dtarr
tm.assert_equal(res2, expected)
with tm.assert_produces_warning(warn):
res = dtarr - other
expected = DatetimeIndex(
[dti[n] - other[n] for n in range(len(dti))], name=dti.name, freq="infer"
)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
pd.DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
pd.DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
pd.DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
pd.DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = pd.DatetimeIndex(exp, tz=tz, freq=exp_freq)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = pd.Timestamp("1700-01-31")
td = pd.Timedelta("20000 Days")
dti = pd.date_range("1949-09-30", freq="100Y", periods=4)
ser = pd.Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = pd.NaT
expected = pd.Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = pd.NaT
expected = pd.Series(
["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]"
)
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", pd.Timestamp.max])
dtimin = pd.to_datetime(["now", pd.Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = pd.Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = pd.Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", pd.Timestamp.max])
dtimin = pd.to_datetime(["now", pd.Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = pd.Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = pd.Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([pd.Timestamp.min])
t1 = tmin + pd.Timedelta.max + pd.Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([pd.Timestamp.max])
t2 = tmax + pd.Timedelta.min - pd.Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
pd.Timestamp("20111230"),
pd.Timestamp("20120101"),
pd.Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
pd.Timestamp("20111231"),
pd.Timestamp("20120102"),
pd.Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([pd.Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([pd.Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = pd.date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = pd.Series(dti)
expected = pd.Series(pd.TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), pd.NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), pd.NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "Unary negative expects"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"), Timestamp("19900315")]),
Series([pd.NaT, Timestamp("19900315")]),
Series([pd.NaT, pd.NaT], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
msg = "cannot perform .* with this index type"
with pytest.raises(TypeError, match=msg):
dt64_series * one
with pytest.raises(TypeError, match=msg):
one * dt64_series
# division
with pytest.raises(TypeError, match=msg):
dt64_series / one
with pytest.raises(TypeError, match=msg):
one / dt64_series
# TODO: parametrize over box
@pytest.mark.parametrize("op", ["__add__", "__radd__", "__sub__", "__rsub__"])
@pytest.mark.parametrize("tz", [None, "Asia/Tokyo"])
def test_dt64_series_add_intlike(self, tz, op):
# GH#19123
dti = pd.DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype="uint8")
method = getattr(ser, op)
msg = "|".join(
[
"Addition/subtraction of integers and integer-arrays",
"cannot subtract .* from ndarray",
]
)
with pytest.raises(TypeError, match=msg):
method(1)
with pytest.raises(TypeError, match=msg):
method(other)
with pytest.raises(TypeError, match=msg):
method(np.array(other))
with pytest.raises(TypeError, match=msg):
method(pd.Index(other))
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError, match=msg):
td2[0] - dt2
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "cannot (add|subtract)"
with pytest.raises(TypeError, match=msg):
td1 - dt1
with pytest.raises(TypeError, match=msg):
td2 - dt2
class TestDatetimeIndexArithmetic:
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_addsub_int(self, tz_naive_fixture, one):
# Variants of `one` for #19012
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01 09:00", freq="H", periods=10, tz=tz)
msg = "Addition/subtraction of integers"
with pytest.raises(TypeError, match=msg):
rng + one
with pytest.raises(TypeError, match=msg):
rng += one
with pytest.raises(TypeError, match=msg):
rng - one
with pytest.raises(TypeError, match=msg):
rng -= one
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("freq", ["H", "D"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_tick(self, int_holder, freq):
# GH#19959
dti = pd.date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "Addition/subtraction of integers|cannot subtract DatetimeArray from"
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("freq", ["W", "M", "MS", "Q"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_non_tick(self, int_holder, freq):
# GH#19959
dti = pd.date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "Addition/subtraction of integers|cannot subtract DatetimeArray from"
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_no_freq(self, int_holder):
# GH#19959
dti = pd.DatetimeIndex(["2016-01-01", "NaT", "2017-04-05 06:07:08"])
other = int_holder([9, 4, -1])
msg = "|".join(
["cannot subtract DatetimeArray from", "Addition/subtraction of integers"]
)
assert_invalid_addsub_type(dti, other, msg)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .*TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract DatetimeArray from"
with pytest.raises(TypeError, match=msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .* from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = "|".join(
[
"cannot perform __neg__ with this index type:",
"ufunc subtract cannot use operands with types",
"cannot subtract DatetimeArray from",
]
)
with pytest.raises(TypeError, match=msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
@pytest.mark.parametrize(
"addend",
[
datetime(2011, 1, 1),
DatetimeIndex(["2011-01-01", "2011-01-02"]),
DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize("US/Eastern"),
np.datetime64("2011-01-01"),
Timestamp("2011-01-01"),
],
ids=lambda x: type(x).__name__,
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_add_datetimelike_and_dtarr(self, box_with_array, addend, tz):
# GH#9631
dti = DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize(tz)
dtarr = tm.box_expected(dti, box_with_array)
msg = "cannot add DatetimeArray and"
with pytest.raises(TypeError, match=msg):
dtarr + addend
with pytest.raises(TypeError, match=msg):
addend + dtarr
# -------------------------------------------------------------
def test_dta_add_sub_index(self, tz_naive_fixture):
# Check that DatetimeArray defers to Index classes
dti = date_range("20130101", periods=3, tz=tz_naive_fixture)
dta = dti.array
result = dta - dti
expected = dti - dti
tm.assert_index_equal(result, expected)
tdi = result
result = dta + tdi
expected = dti + tdi
tm.assert_index_equal(result, expected)
result = dta - tdi
expected = dti - tdi
tm.assert_index_equal(result, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range("20130101", periods=3)
dti_tz = date_range("20130101", periods=3).tz_localize("US/Eastern")
dti_tz2 = date_range("20130101", periods=3).tz_localize("UTC")
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
msg = "DatetimeArray subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dti_tz - dti
with pytest.raises(TypeError, match=msg):
dti - dti_tz
with pytest.raises(TypeError, match=msg):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range("20130101", periods=3)
dti2 = date_range("20130101", periods=4)
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(["2012-01-01", np.nan, "2012-01-03"])
dti2 = DatetimeIndex(["2012-01-02", "2012-01-03", np.nan])
expected = TimedeltaIndex(["1 days", np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------------
# TODO: Most of this block is moved from series or frame tests, needs
# cleanup, box-parametrization, and de-duplication
@pytest.mark.parametrize("op", [operator.add, operator.sub])
def test_timedelta64_equal_timedelta_supported_ops(self, op):
ser = Series(
[
Timestamp("20130301"),
Timestamp("20130228 23:00:00"),
Timestamp("20130228 22:00:00"),
Timestamp("20130228 21:00:00"),
]
)
intervals = ["D", "h", "m", "s", "us"]
def timedelta64(*args):
# see casting notes in NumPy gh-12927
return np.sum(list(starmap(np.timedelta64, zip(args, intervals))))
for d, h, m, s, us in product(*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s, microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
tm.assert_series_equal(lhs, rhs)
def test_ops_nat_mixed_datetime64_timedelta64(self):
# GH#11349
timedelta_series = Series([NaT, Timedelta("1s")])
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]")
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]")
# subtraction
tm.assert_series_equal(
datetime_series - single_nat_dtype_datetime, nat_series_dtype_timedelta
)
tm.assert_series_equal(
datetime_series - single_nat_dtype_timedelta, nat_series_dtype_timestamp
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + datetime_series, nat_series_dtype_timestamp
)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_datetime,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
msg = "cannot subtract a datelike"
with pytest.raises(TypeError, match=msg):
timedelta_series - single_nat_dtype_datetime
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_datetime,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_datetime + nat_series_dtype_timedelta,
nat_series_dtype_timestamp,
)
def test_ufunc_coercions(self):
idx = date_range("2011-01-01", periods=3, freq="2D", name="x")
delta = np.timedelta64(1, "D")
exp = date_range("2011-01-02", periods=3, freq="2D", name="x")
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
exp = date_range("2010-12-31", periods=3, freq="2D", name="x")
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
delta = np.array(
[np.timedelta64(1, "D"), np.timedelta64(2, "D"), np.timedelta64(3, "D")]
)
exp = DatetimeIndex(
["2011-01-02", "2011-01-05", "2011-01-08"], freq="3D", name="x"
)
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "3D"
exp = DatetimeIndex(
["2010-12-31", "2011-01-01", "2011-01-02"], freq="D", name="x"
)
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "D"
@pytest.mark.parametrize(
"names", [("foo", None, None), ("baz", "bar", None), ("bar", "bar", "bar")]
)
@pytest.mark.parametrize("tz", [None, "America/Chicago"])
def test_dti_add_series(self, tz, names):
# GH#13905
index = DatetimeIndex(
["2016-06-28 05:30", "2016-06-28 05:31"], tz=tz, name=names[0]
)
ser = Series([Timedelta(seconds=5)] * 2, index=index, name=names[1])
expected = Series(index + Timedelta(seconds=5), index=index, name=names[2])
# passing name arg isn't enough when names[2] is None
expected.name = names[2]
assert expected.dtype == index.dtype
result = ser + index
tm.assert_series_equal(result, expected)
result2 = index + ser
tm.assert_series_equal(result2, expected)
expected = index + Timedelta(seconds=5)
result3 = ser.values + index
tm.assert_index_equal(result3, expected)
result4 = index + ser.values
tm.assert_index_equal(result4, expected)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("foo", "foo", "foo")]
)
def test_dti_addsub_offset_arraylike(
self, tz_naive_fixture, names, op, index_or_series
):
# GH#18849, GH#19744
box = pd.Index
other_box = index_or_series
tz = tz_naive_fixture
dti = pd.date_range("2017-01-01", periods=2, tz=tz, name=names[0])
other = other_box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], name=names[1])
xbox = get_upcast_box(box, other)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dti, other)
expected = DatetimeIndex(
[op(dti[n], other[n]) for n in range(len(dti))], name=names[2], freq="infer"
)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
@pytest.mark.parametrize("other_box", [pd.Index, np.array])
def test_dti_addsub_object_arraylike(
self, tz_naive_fixture, box_with_array, other_box
):
tz = tz_naive_fixture
dti = pd.date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = other_box([pd.offsets.MonthEnd(), pd.Timedelta(days=4)])
xbox = get_upcast_box(box_with_array, other)
expected = pd.DatetimeIndex(["2017-01-31", "2017-01-06"], tz=tz_naive_fixture)
expected = tm.box_expected(expected, xbox)
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
result = dtarr + other
tm.assert_equal(result, expected)
expected = pd.DatetimeIndex(["2016-12-31", "2016-12-29"], tz=tz_naive_fixture)
expected = tm.box_expected(expected, xbox)
with tm.assert_produces_warning(warn):
result = dtarr - other
tm.assert_equal(result, expected)
@pytest.mark.parametrize("years", [-1, 0, 1])
@pytest.mark.parametrize("months", [-2, 0, 2])
def test_shift_months(years, months):
dti = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
]
)
actual = DatetimeIndex(shift_months(dti.asi8, years * 12 + months))
raw = [x + pd.offsets.DateOffset(years=years, months=months) for x in dti]
expected = DatetimeIndex(raw)
tm.assert_index_equal(actual, expected)
def test_dt64arr_addsub_object_dtype_2d():
# block-wise DataFrame operations will require operating on 2D
# DatetimeArray/TimedeltaArray, so check that specifically.
dti = pd.date_range("1994-02-13", freq="2W", periods=4)
dta = dti._data.reshape((4, 1))
other = np.array([[pd.offsets.Day(n)] for n in range(4)])
assert other.shape == dta.shape
with tm.assert_produces_warning(PerformanceWarning):
result = dta + other
with tm.assert_produces_warning(PerformanceWarning):
expected = (dta[:, 0] + other[:, 0]).reshape(-1, 1)
assert isinstance(result, DatetimeArray)
assert result.freq is None
tm.assert_numpy_array_equal(result._data, expected._data)
with tm.assert_produces_warning(PerformanceWarning):
# Case where we expect to get a TimedeltaArray back
result2 = dta - dta.astype(object)
assert isinstance(result2, TimedeltaArray)
assert result2.shape == (4, 1)
assert result2.freq is None
assert (result2.asi8 == 0).all()
| 36.473965 | 88 | 0.584364 |
from datetime import datetime, timedelta
from itertools import product, starmap
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.compat.numpy import np_datetime64_compat
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray, TimedeltaArray
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
class TestDatetime64ArrayLikeComparisons:
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(self, other, tz_naive_fixture):
# other plays poorly with assert_invalid_comparison reversed checks
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
assert_invalid_comparison(dta, other, tm.to_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], pd.Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
xbox = box if box is not pd.Index else np.ndarray
ts = pd.Timestamp.now(tz)
ser = pd.Series([ts, pd.NaT])
# FIXME: Can't transpose because that loses the tz dtype on
obj = tm.box_expected(ser, box, transpose=False)
expected = pd.Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox, transpose=False)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
@pytest.mark.parametrize(
"pair",
[
(
[pd.Timestamp("2011-01-01"), NaT, pd.Timestamp("2011-01-03")],
[NaT, NaT, pd.Timestamp("2011-01-03")],
),
(
[pd.Timedelta("1 days"), NaT, pd.Timedelta("3 days")],
[NaT, NaT, pd.Timedelta("3 days")],
),
(
[pd.Period("2011-01", freq="M"), NaT, pd.Period("2011-03", freq="M")],
[NaT, NaT, pd.Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons(self, dtype, index_or_series, reverse, pair):
box = index_or_series
l, r = pair
if reverse:
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
expected = Series([False, False, True])
tm.assert_series_equal(left == right, expected)
expected = Series([True, True, False])
tm.assert_series_equal(left != right, expected)
expected = Series([False, False, False])
tm.assert_series_equal(left < right, expected)
expected = Series([False, False, False])
tm.assert_series_equal(left > right, expected)
expected = Series([False, False, True])
tm.assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
tm.assert_series_equal(left <= right, expected)
def test_comparison_invalid(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
ser = Series(range(5))
ser2 = Series(pd.date_range("20010101", periods=5, tz=tz))
ser = tm.box_expected(ser, box_with_array)
ser2 = tm.box_expected(ser2, box_with_array)
assert_invalid_comparison(ser, ser2, box_with_array)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
if box_with_array is tm.to_array and dtype is object:
return
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box_with_array)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
ser = pd.Series(pd.date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = pd.Timestamp("nat")
ser[3] = pd.Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
expected = left_f(ser, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
expected = left_f(ser, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
expected = left_f(s_nat, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
expected = left_f(s_nat, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
ser = pd.Series([pd.Timestamp("2000-01-29 01:59:00"), "NaT"])
ser = tm.box_expected(ser, box_with_array)
result = ser != ser
expected = tm.box_expected([False, True], xbox)
tm.assert_equal(result, expected)
result = ser != ser[0]
expected = tm.box_expected([False, True], xbox)
tm.assert_equal(result, expected)
result = ser != ser[1]
expected = tm.box_expected([True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, False], xbox)
tm.assert_equal(result, expected)
result = ser == ser[0]
expected = tm.box_expected([True, False], xbox)
tm.assert_equal(result, expected)
result = ser == ser[1]
expected = tm.box_expected([False, False], xbox)
tm.assert_equal(result, expected)
class TestDatetimeIndexComparisons:
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.lt, operator.ge, operator.le],
)
def test_comparators(self, op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = op(arr, element)
index_result = op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
if box_with_array is tm.to_array and dtype is object:
return
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
left = pd.DatetimeIndex(
[pd.Timestamp("2011-01-01"), pd.NaT, pd.Timestamp("2011-01-03")]
)
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == pd.NaT, expected)
tm.assert_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != pd.NaT, expected)
tm.assert_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < pd.NaT, expected)
tm.assert_equal(pd.NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = pd.DatetimeIndex(
["2014-01-01", pd.NaT, "2014-03-01", pd.NaT, "2014-05-01", "2014-07-01"]
)
didx2 = pd.DatetimeIndex(
["2014-02-01", "2014-03-01", pd.NaT, pd.NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np_datetime64_compat("2014-02-01 00:00Z"),
np_datetime64_compat("2014-03-01 00:00Z"),
np_datetime64_compat("nat"),
np.datetime64("nat"),
np_datetime64_compat("2014-06-01 00:00Z"),
np_datetime64_compat("2014-07-01 00:00Z"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_comparison_tzawareness_compat(self, op, box_df_fail):
box = box_df_fail
dr = pd.date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, list(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(list(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, list(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(list(dr), dtype=object))
assert np.all(dr == dr)
assert np.all(dr == list(dr))
assert np.all(list(dr) == dr)
assert np.all(np.array(list(dr), dtype=object) == dr)
assert np.all(dr == np.array(list(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == list(dz))
assert np.all(list(dz) == dz)
assert np.all(np.array(list(dz), dtype=object) == dz)
assert np.all(dz == np.array(list(dz), dtype=object))
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_comparison_tzawareness_compat_scalars(self, op, box_with_array):
dr = pd.date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
ts = pd.Timestamp("2000-03-14 01:59")
ts_tz = pd.Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
with pytest.raises(TypeError, match=msg):
op(dz, ts)
op(ts, dz)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, op, other, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = pd.date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_nat_comparison_tzawareness(self, op):
dti = pd.DatetimeIndex(
["2014-01-01", pd.NaT, "2014-03-01", pd.NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, pd.NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), pd.NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
# tzawareness failure
dti != other
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = "Cannot compare type"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_dt64arr_iadd_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
def test_dt64arr_isub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
rng -= two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_add_td64_scalar(self, box_with_array):
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
pd.date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = pd.DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = pd.date_range("2015-12-31", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = pd.date_range("2016-01-02", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
@pytest.mark.parametrize(
"ts",
[
pd.Timestamp("2013-01-01"),
pd.Timestamp("2013-01-01").to_pydatetime(),
pd.Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
expected = pd.TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
ate_range("20130101", periods=3)
dtarr = tm.box_expected(dti, box_with_array)
expected = pd.TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = pd.date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = pd.Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
dti = pd.DatetimeIndex([pd.NaT, pd.Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - pd.NaT
expected = pd.Series([pd.NaT, pd.NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - pd.NaT
expected = pd.Series([pd.NaT, pd.NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = pd.date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
warn = PerformanceWarning if box_with_array is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = pd.date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "cannot add"
with pytest.raises(TypeError, match=msg):
dtarr + dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals + dtarr
def test_dt64arr_add_timestamp_raises(self, box_with_array):
-02"])
idx = tm.box_expected(idx, box_with_array)
msg = "cannot add"
with pytest.raises(TypeError, match=msg):
idx + Timestamp("2011-01-01")
with pytest.raises(TypeError, match=msg):
Timestamp("2011-01-01") + idx
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
pd.Period("2011-01-01", freq="D"),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
offset = dates + pd.offsets.Hour(5)
tm.assert_equal(offset, expected)
offset = dates + np.timedelta64(5, "h")
tm.assert_equal(offset, expected)
offset = dates + timedelta(hours=5)
tm.assert_equal(offset, expected)
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.squeeze() if box_with_array is pd.DataFrame else vec
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, kwd in enumerate(relative_kwargs):
off = pd.DateOffset(**dict([kwd]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = pd.DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
if isinstance(cls_and_kwargs, tuple):
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.squeeze() if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + pd.DateOffset(years=1)
result2 = pd.DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - pd.DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
# TODO: __sub__, __rsub__
def test_dt64arr_add_mixed_offset_array(self, box_with_array):
# GH#10699
# array of offsets
s = DatetimeIndex([Timestamp("2000-1-1"), Timestamp("2000-2-1")])
s = tm.box_expected(s, box_with_array)
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
other = pd.Index([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()])
other = tm.box_expected(other, box_with_array)
result = s + other
exp = DatetimeIndex([Timestamp("2001-1-1"), Timestamp("2000-2-29")])
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
# same offset
other = pd.Index(
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
)
other = tm.box_expected(other, box_with_array)
result = s + other
exp = DatetimeIndex([Timestamp("2001-1-1"), Timestamp("2001-2-1")])
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
# TODO: overlap with test_dt64arr_add_mixed_offset_array?
def test_dt64arr_add_sub_offset_ndarray(self, tz_naive_fixture, box_with_array):
# GH#18849
tz = tz_naive_fixture
dti = pd.date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
res = dtarr + other
expected = DatetimeIndex(
[dti[n] + other[n] for n in range(len(dti))], name=dti.name, freq="infer"
)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + dtarr
tm.assert_equal(res2, expected)
with tm.assert_produces_warning(warn):
res = dtarr - other
expected = DatetimeIndex(
[dti[n] - other[n] for n in range(len(dti))], name=dti.name, freq="infer"
)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
pd.DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
pd.DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
pd.DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
pd.DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = pd.DatetimeIndex(exp, tz=tz, freq=exp_freq)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = pd.Timestamp("1700-01-31")
td = pd.Timedelta("20000 Days")
dti = pd.date_range("1949-09-30", freq="100Y", periods=4)
ser = pd.Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = pd.NaT
expected = pd.Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = pd.NaT
expected = pd.Series(
["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]"
)
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", pd.Timestamp.max])
dtimin = pd.to_datetime(["now", pd.Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = pd.Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = pd.Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", pd.Timestamp.max])
dtimin = pd.to_datetime(["now", pd.Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = pd.Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = pd.Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([pd.Timestamp.min])
t1 = tmin + pd.Timedelta.max + pd.Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([pd.Timestamp.max])
t2 = tmax + pd.Timedelta.min - pd.Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
pd.Timestamp("20111230"),
pd.Timestamp("20120101"),
pd.Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
pd.Timestamp("20111231"),
pd.Timestamp("20120102"),
pd.Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([pd.Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([pd.Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = pd.date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = pd.Series(dti)
expected = pd.Series(pd.TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), pd.NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), pd.NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "Unary negative expects"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"), Timestamp("19900315")]),
Series([pd.NaT, Timestamp("19900315")]),
Series([pd.NaT, pd.NaT], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
msg = "cannot perform .* with this index type"
with pytest.raises(TypeError, match=msg):
dt64_series * one
with pytest.raises(TypeError, match=msg):
one * dt64_series
# division
with pytest.raises(TypeError, match=msg):
dt64_series / one
with pytest.raises(TypeError, match=msg):
one / dt64_series
# TODO: parametrize over box
@pytest.mark.parametrize("op", ["__add__", "__radd__", "__sub__", "__rsub__"])
@pytest.mark.parametrize("tz", [None, "Asia/Tokyo"])
def test_dt64_series_add_intlike(self, tz, op):
# GH#19123
dti = pd.DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype="uint8")
method = getattr(ser, op)
msg = "|".join(
[
"Addition/subtraction of integers and integer-arrays",
"cannot subtract .* from ndarray",
]
)
with pytest.raises(TypeError, match=msg):
method(1)
with pytest.raises(TypeError, match=msg):
method(other)
with pytest.raises(TypeError, match=msg):
method(np.array(other))
with pytest.raises(TypeError, match=msg):
method(pd.Index(other))
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError, match=msg):
td2[0] - dt2
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "cannot (add|subtract)"
with pytest.raises(TypeError, match=msg):
td1 - dt1
with pytest.raises(TypeError, match=msg):
td2 - dt2
class TestDatetimeIndexArithmetic:
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_addsub_int(self, tz_naive_fixture, one):
# Variants of `one` for #19012
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01 09:00", freq="H", periods=10, tz=tz)
msg = "Addition/subtraction of integers"
with pytest.raises(TypeError, match=msg):
rng + one
with pytest.raises(TypeError, match=msg):
rng += one
with pytest.raises(TypeError, match=msg):
rng - one
with pytest.raises(TypeError, match=msg):
rng -= one
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("freq", ["H", "D"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_tick(self, int_holder, freq):
# GH#19959
dti = pd.date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "Addition/subtraction of integers|cannot subtract DatetimeArray from"
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("freq", ["W", "M", "MS", "Q"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_non_tick(self, int_holder, freq):
# GH#19959
dti = pd.date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "Addition/subtraction of integers|cannot subtract DatetimeArray from"
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_no_freq(self, int_holder):
# GH#19959
dti = pd.DatetimeIndex(["2016-01-01", "NaT", "2017-04-05 06:07:08"])
other = int_holder([9, 4, -1])
msg = "|".join(
["cannot subtract DatetimeArray from", "Addition/subtraction of integers"]
)
assert_invalid_addsub_type(dti, other, msg)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .*TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract DatetimeArray from"
with pytest.raises(TypeError, match=msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .* from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = "|".join(
[
"cannot perform __neg__ with this index type:",
"ufunc subtract cannot use operands with types",
"cannot subtract DatetimeArray from",
]
)
with pytest.raises(TypeError, match=msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
@pytest.mark.parametrize(
"addend",
[
datetime(2011, 1, 1),
DatetimeIndex(["2011-01-01", "2011-01-02"]),
DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize("US/Eastern"),
np.datetime64("2011-01-01"),
Timestamp("2011-01-01"),
],
ids=lambda x: type(x).__name__,
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_add_datetimelike_and_dtarr(self, box_with_array, addend, tz):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize(tz)
dtarr = tm.box_expected(dti, box_with_array)
msg = "cannot add DatetimeArray and"
with pytest.raises(TypeError, match=msg):
dtarr + addend
with pytest.raises(TypeError, match=msg):
addend + dtarr
def test_dta_add_sub_index(self, tz_naive_fixture):
dti = date_range("20130101", periods=3, tz=tz_naive_fixture)
dta = dti.array
result = dta - dti
expected = dti - dti
tm.assert_index_equal(result, expected)
tdi = result
result = dta + tdi
expected = dti + tdi
tm.assert_index_equal(result, expected)
result = dta - tdi
expected = dti - tdi
tm.assert_index_equal(result, expected)
def test_sub_dti_dti(self):
dti = date_range("20130101", periods=3)
dti_tz = date_range("20130101", periods=3).tz_localize("US/Eastern")
dti_tz2 = date_range("20130101", periods=3).tz_localize("UTC")
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
msg = "DatetimeArray subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dti_tz - dti
with pytest.raises(TypeError, match=msg):
dti - dti_tz
with pytest.raises(TypeError, match=msg):
dti_tz - dti_tz2
dti -= dti
tm.assert_index_equal(dti, expected)
dti1 = date_range("20130101", periods=3)
dti2 = date_range("20130101", periods=4)
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
dti1 - dti2
dti1 = DatetimeIndex(["2012-01-01", np.nan, "2012-01-03"])
dti2 = DatetimeIndex(["2012-01-02", "2012-01-03", np.nan])
expected = TimedeltaIndex(["1 days", np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("op", [operator.add, operator.sub])
def test_timedelta64_equal_timedelta_supported_ops(self, op):
ser = Series(
[
Timestamp("20130301"),
Timestamp("20130228 23:00:00"),
Timestamp("20130228 22:00:00"),
Timestamp("20130228 21:00:00"),
]
)
intervals = ["D", "h", "m", "s", "us"]
def timedelta64(*args):
return np.sum(list(starmap(np.timedelta64, zip(args, intervals))))
for d, h, m, s, us in product(*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s, microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
tm.assert_series_equal(lhs, rhs)
def test_ops_nat_mixed_datetime64_timedelta64(self):
timedelta_series = Series([NaT, Timedelta("1s")])
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]")
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]")
tm.assert_series_equal(
datetime_series - single_nat_dtype_datetime, nat_series_dtype_timedelta
)
tm.assert_series_equal(
datetime_series - single_nat_dtype_timedelta, nat_series_dtype_timestamp
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + datetime_series, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_datetime,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
msg = "cannot subtract a datelike"
with pytest.raises(TypeError, match=msg):
timedelta_series - single_nat_dtype_datetime
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_datetime,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_datetime + nat_series_dtype_timedelta,
nat_series_dtype_timestamp,
)
def test_ufunc_coercions(self):
idx = date_range("2011-01-01", periods=3, freq="2D", name="x")
delta = np.timedelta64(1, "D")
exp = date_range("2011-01-02", periods=3, freq="2D", name="x")
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
exp = date_range("2010-12-31", periods=3, freq="2D", name="x")
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
delta = np.array(
[np.timedelta64(1, "D"), np.timedelta64(2, "D"), np.timedelta64(3, "D")]
)
exp = DatetimeIndex(
["2011-01-02", "2011-01-05", "2011-01-08"], freq="3D", name="x"
)
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "3D"
exp = DatetimeIndex(
["2010-12-31", "2011-01-01", "2011-01-02"], freq="D", name="x"
)
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "D"
@pytest.mark.parametrize(
"names", [("foo", None, None), ("baz", "bar", None), ("bar", "bar", "bar")]
)
@pytest.mark.parametrize("tz", [None, "America/Chicago"])
def test_dti_add_series(self, tz, names):
index = DatetimeIndex(
["2016-06-28 05:30", "2016-06-28 05:31"], tz=tz, name=names[0]
)
ser = Series([Timedelta(seconds=5)] * 2, index=index, name=names[1])
expected = Series(index + Timedelta(seconds=5), index=index, name=names[2])
expected.name = names[2]
assert expected.dtype == index.dtype
result = ser + index
tm.assert_series_equal(result, expected)
result2 = index + ser
tm.assert_series_equal(result2, expected)
expected = index + Timedelta(seconds=5)
result3 = ser.values + index
tm.assert_index_equal(result3, expected)
result4 = index + ser.values
tm.assert_index_equal(result4, expected)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("foo", "foo", "foo")]
)
def test_dti_addsub_offset_arraylike(
self, tz_naive_fixture, names, op, index_or_series
):
# GH#18849, GH#19744
box = pd.Index
other_box = index_or_series
tz = tz_naive_fixture
dti = pd.date_range("2017-01-01", periods=2, tz=tz, name=names[0])
other = other_box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], name=names[1])
xbox = get_upcast_box(box, other)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dti, other)
expected = DatetimeIndex(
[op(dti[n], other[n]) for n in range(len(dti))], name=names[2], freq="infer"
)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
@pytest.mark.parametrize("other_box", [pd.Index, np.array])
def test_dti_addsub_object_arraylike(
self, tz_naive_fixture, box_with_array, other_box
):
tz = tz_naive_fixture
dti = pd.date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = other_box([pd.offsets.MonthEnd(), pd.Timedelta(days=4)])
xbox = get_upcast_box(box_with_array, other)
expected = pd.DatetimeIndex(["2017-01-31", "2017-01-06"], tz=tz_naive_fixture)
expected = tm.box_expected(expected, xbox)
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
result = dtarr + other
tm.assert_equal(result, expected)
expected = pd.DatetimeIndex(["2016-12-31", "2016-12-29"], tz=tz_naive_fixture)
expected = tm.box_expected(expected, xbox)
with tm.assert_produces_warning(warn):
result = dtarr - other
tm.assert_equal(result, expected)
@pytest.mark.parametrize("years", [-1, 0, 1])
@pytest.mark.parametrize("months", [-2, 0, 2])
def test_shift_months(years, months):
dti = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
]
)
actual = DatetimeIndex(shift_months(dti.asi8, years * 12 + months))
raw = [x + pd.offsets.DateOffset(years=years, months=months) for x in dti]
expected = DatetimeIndex(raw)
tm.assert_index_equal(actual, expected)
def test_dt64arr_addsub_object_dtype_2d():
# block-wise DataFrame operations will require operating on 2D
# DatetimeArray/TimedeltaArray, so check that specifically.
dti = pd.date_range("1994-02-13", freq="2W", periods=4)
dta = dti._data.reshape((4, 1))
other = np.array([[pd.offsets.Day(n)] for n in range(4)])
assert other.shape == dta.shape
with tm.assert_produces_warning(PerformanceWarning):
result = dta + other
with tm.assert_produces_warning(PerformanceWarning):
expected = (dta[:, 0] + other[:, 0]).reshape(-1, 1)
assert isinstance(result, DatetimeArray)
assert result.freq is None
tm.assert_numpy_array_equal(result._data, expected._data)
with tm.assert_produces_warning(PerformanceWarning):
# Case where we expect to get a TimedeltaArray back
result2 = dta - dta.astype(object)
assert isinstance(result2, TimedeltaArray)
assert result2.shape == (4, 1)
assert result2.freq is None
assert (result2.asi8 == 0).all()
| true | true |
f7211b62c471429cc135fe0e8292971b94db291e | 1,167 | py | Python | app/database/api/models/resource.py | space-logistics-org/spacenet | fd004437ed7b27dd6dc41a374e1dedfcea92e37d | [
"MIT"
] | 1 | 2022-02-17T18:01:41.000Z | 2022-02-17T18:01:41.000Z | app/database/api/models/resource.py | space-logistics-org/spacenet | fd004437ed7b27dd6dc41a374e1dedfcea92e37d | [
"MIT"
] | 2 | 2021-06-19T19:41:15.000Z | 2021-07-21T17:07:48.000Z | app/database/api/models/resource.py | space-logistics-org/spacenet | fd004437ed7b27dd6dc41a374e1dedfcea92e37d | [
"MIT"
] | 3 | 2021-06-16T16:31:12.000Z | 2022-02-17T18:02:57.000Z | """
This module defines the database schema for resources and resource subclasses.
"""
from sqlalchemy import Column, Integer, String, Float
from ..database import Base
from spacenet.schemas.resource import ResourceType
__all__ = ["Resource", "ResourceType", "ContinuousResource", "DiscreteResource"]
class Resource(Base):
"""
A row representing a single resource, can be continuous or discrete.
"""
__tablename__ = "resource"
id = Column(Integer, primary_key=True, index=True)
type = Column(String)
name = Column(String)
description = Column(String)
class_of_supply = Column(Integer)
units = Column(String)
unit_mass = Column(Float)
unit_volume = Column(Float)
__mapper_args__ = {"polymorphic_identity": "resource", "polymorphic_on": type}
class DiscreteResource(Resource):
"""
A row representing a single discrete resource.
"""
__mapper_args__ = {"polymorphic_identity": ResourceType.Discrete.value}
class ContinuousResource(Resource):
"""
A row representing a single continuous resource.
"""
__mapper_args__ = {"polymorphic_identity": ResourceType.Continuous.value}
| 25.369565 | 82 | 0.717224 |
from sqlalchemy import Column, Integer, String, Float
from ..database import Base
from spacenet.schemas.resource import ResourceType
__all__ = ["Resource", "ResourceType", "ContinuousResource", "DiscreteResource"]
class Resource(Base):
__tablename__ = "resource"
id = Column(Integer, primary_key=True, index=True)
type = Column(String)
name = Column(String)
description = Column(String)
class_of_supply = Column(Integer)
units = Column(String)
unit_mass = Column(Float)
unit_volume = Column(Float)
__mapper_args__ = {"polymorphic_identity": "resource", "polymorphic_on": type}
class DiscreteResource(Resource):
__mapper_args__ = {"polymorphic_identity": ResourceType.Discrete.value}
class ContinuousResource(Resource):
__mapper_args__ = {"polymorphic_identity": ResourceType.Continuous.value}
| true | true |
f7211bd5305aa8d6dd9cc38d64504cc0312f6ab1 | 812 | py | Python | Latte/ex5.py | Latte-inc/Learn-Python3.6 | f3568cf2f8413f8730c2297bc39ae890bb82d962 | [
"CC0-1.0"
] | 1 | 2021-10-15T05:43:19.000Z | 2021-10-15T05:43:19.000Z | Latte/ex5.py | Latte-inc/Learn-Python3.6 | f3568cf2f8413f8730c2297bc39ae890bb82d962 | [
"CC0-1.0"
] | null | null | null | Latte/ex5.py | Latte-inc/Learn-Python3.6 | f3568cf2f8413f8730c2297bc39ae890bb82d962 | [
"CC0-1.0"
] | 1 | 2022-01-13T10:34:55.000Z | 2022-01-13T10:34:55.000Z | # # This code is learn Python new code, variable format string start!
# Time 2020/05/15 00:44
# fatcat like .....
my_name = 'fatcat'
my_age = 24 #肥猫真的24岁哦!
my_height = 176 #是厘米(CM)哦!
my_weight = 93 #是公斤(Kg)哦!
my_eyes = 'black'
my_teeth = 'white'
my_hair = 'black'
#上述变量被赋予了两种类型 一种是赋予变量数字值,一种是赋予变量字符串。
print(f"Let's talk about {my_name}.")
print(f"He's {my_height} CM.")
print(f"He's {my_weight} kilo.")
print("Actually that's not too heavy.")
print(f"His teeth are usually {my_teeth} depending on the coffee.")
# this line is tricky , try to get it exactly right
total = my_age + my_height + my_weight
print(f"If I add {my_age}, {my_height}, and {my_weight} I get {total}.")
# 上述代码(15-23 line)使用了 格式化字符串(format string) 并在字符串里嵌入变量
# 所使用的的方法为 print(f“{}”),在双引号前加入 f 相当于告诉编译器这是个格式化字符
| 30.074074 | 73 | 0.685961 | 3
my_eyes = 'black'
my_teeth = 'white'
my_hair = 'black'
print(f"Let's talk about {my_name}.")
print(f"He's {my_height} CM.")
print(f"He's {my_weight} kilo.")
print("Actually that's not too heavy.")
print(f"His teeth are usually {my_teeth} depending on the coffee.")
total = my_age + my_height + my_weight
print(f"If I add {my_age}, {my_height}, and {my_weight} I get {total}.")
| true | true |
f7211beca92603a62d9cbaad149c7663ec244549 | 881 | py | Python | examples/pylab_examples/contour_corner_mask.py | argriffing/matplotlib | 5555f5463fb5f995a59f7651c0034a5d6a4c7e84 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2019-04-15T09:40:53.000Z | 2019-04-15T09:40:53.000Z | examples/pylab_examples/contour_corner_mask.py | argriffing/matplotlib | 5555f5463fb5f995a59f7651c0034a5d6a4c7e84 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | examples/pylab_examples/contour_corner_mask.py | argriffing/matplotlib | 5555f5463fb5f995a59f7651c0034a5d6a4c7e84 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""
Illustrate the difference between corner_mask=False and corner_mask=True
for masked contour plots.
"""
import matplotlib.pyplot as plt
import numpy as np
# Data to plot.
x, y = np.meshgrid(np.arange(7), np.arange(10))
z = np.sin(0.5*x)*np.cos(0.52*y)
# Mask various z values.
mask = np.zeros_like(z, dtype=np.bool)
mask[2, 3:5] = True
mask[3:5, 4] = True
mask[7, 2] = True
mask[5, 0] = True
mask[0, 6] = True
z = np.ma.array(z, mask=mask)
corner_masks = [False, True]
for i, corner_mask in enumerate(corner_masks):
plt.subplot(1, 2, i+1)
cs = plt.contourf(x, y, z, corner_mask=corner_mask)
plt.contour(cs, colors='k')
plt.title('corner_mask = {0}'.format(corner_mask))
# Plot grid.
plt.grid(c='k', ls='-', alpha=0.3)
# Indicate masked points with red circles.
plt.plot(np.ma.array(x, mask=~mask), y, 'ro')
plt.show()
| 24.472222 | 72 | 0.658343 |
import matplotlib.pyplot as plt
import numpy as np
x, y = np.meshgrid(np.arange(7), np.arange(10))
z = np.sin(0.5*x)*np.cos(0.52*y)
mask = np.zeros_like(z, dtype=np.bool)
mask[2, 3:5] = True
mask[3:5, 4] = True
mask[7, 2] = True
mask[5, 0] = True
mask[0, 6] = True
z = np.ma.array(z, mask=mask)
corner_masks = [False, True]
for i, corner_mask in enumerate(corner_masks):
plt.subplot(1, 2, i+1)
cs = plt.contourf(x, y, z, corner_mask=corner_mask)
plt.contour(cs, colors='k')
plt.title('corner_mask = {0}'.format(corner_mask))
plt.grid(c='k', ls='-', alpha=0.3)
plt.plot(np.ma.array(x, mask=~mask), y, 'ro')
plt.show()
| true | true |
f7211e7c6967282019c097e1107691531485b132 | 847 | py | Python | authors/apps/notify/migrations/0001_initial.py | andela/Ah-backend-valkyrie | f0eb64c27e1fe37d5c81e4b9a8762dcf3c336a79 | [
"BSD-3-Clause"
] | null | null | null | authors/apps/notify/migrations/0001_initial.py | andela/Ah-backend-valkyrie | f0eb64c27e1fe37d5c81e4b9a8762dcf3c336a79 | [
"BSD-3-Clause"
] | 46 | 2019-01-08T13:16:41.000Z | 2021-04-30T20:47:08.000Z | authors/apps/notify/migrations/0001_initial.py | andela/Ah-backend-valkyrie | f0eb64c27e1fe37d5c81e4b9a8762dcf3c336a79 | [
"BSD-3-Clause"
] | 3 | 2019-01-07T08:21:59.000Z | 2019-09-20T06:43:18.000Z | # Generated by Django 2.1.5 on 2019-01-30 03:13
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='MailList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('recieve_email_notifications', models.BooleanField(default=True)),
('recieve_push_notifications', models.BooleanField(default=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 31.37037 | 118 | 0.651712 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='MailList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('recieve_email_notifications', models.BooleanField(default=True)),
('recieve_push_notifications', models.BooleanField(default=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f7211e7d3cba1e8c8ec791ea66706c3f0cbcf0a0 | 3,776 | py | Python | search_engine_parser/core/utils.py | justfly50/search-engine-parser | 0418867b3529980d5a4eb71899dec37092fe7df1 | [
"MIT"
] | 276 | 2019-02-01T22:48:46.000Z | 2021-10-17T21:25:13.000Z | search_engine_parser/core/utils.py | justfly50/search-engine-parser | 0418867b3529980d5a4eb71899dec37092fe7df1 | [
"MIT"
] | 95 | 2019-02-03T00:04:11.000Z | 2021-09-22T17:45:56.000Z | search_engine_parser/core/utils.py | justfly50/search-engine-parser | 0418867b3529980d5a4eb71899dec37092fe7df1 | [
"MIT"
] | 74 | 2019-02-02T11:04:17.000Z | 2021-10-09T23:49:25.000Z | import os
import random
import pickle
import hashlib
import aiohttp
from fake_useragent import UserAgent
FILEPATH = os.path.dirname(os.path.abspath(__file__))
# prevent caching
USER_AGENT_LIST = [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/72.0.3626.121 Safari/537.36",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100 101 Firefox/22.0",
"Mozilla/5.0 (Windows NT 6.1; rv:11.0) Gecko/20100101 Firefox/11.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.5 (KHTML, like Gecko) "
"Chrome/19.0.1084.46 Safari/536.5",
"Mozilla/5.0 (Windows; Windows NT 6.1) AppleWebKit/536.5 (KHTML, like Gecko) "
"Chrome/19.0.1084.46 Safari/536.5",
]
def get_rand_user_agent():
user_agent = random.choice(USER_AGENT_LIST)
try:
user_agent = UserAgent().random
except:
pass
return user_agent
class CacheHandler:
def __init__(self):
self.cache = os.path.join(FILEPATH, "cache")
engine_path = os.path.join(FILEPATH, "engines")
if not os.path.exists(self.cache):
os.makedirs(self.cache)
enginelist = os.listdir(engine_path)
self.engine_cache = {i[:-3]: os.path.join(self.cache, i[:-3]) for i in enginelist if i not in
("__init__.py")}
for cache in self.engine_cache.values():
if not os.path.exists(cache):
os.makedirs(cache)
async def get_source(self, engine, url, headers, cache=True,
proxy=None, proxy_auth=None):
"""
Retrieves source code of webpage from internet or from cache
:rtype: str, bool
:param engine: engine of the engine saving
:type engine: str
:param url: URL to pull source code from
:type url: str
:param headers: request headers to make use of
:type headers: dict
:param cache: use cache or not
:type cache: bool
:param proxy: proxy address to make use off
:type proxy: str
:param proxy_auth: (user, password) tuple to authenticate proxy
:type proxy_auth: (str, str)
"""
encodedUrl = url.encode("utf-8")
urlhash = hashlib.sha256(encodedUrl).hexdigest()
engine = engine.lower()
cache_path = os.path.join(self.engine_cache[engine], urlhash)
if os.path.exists(cache_path) and cache:
with open(cache_path, 'rb') as stream:
return pickle.load(stream), True
get_vars = { 'url':url, 'headers':headers }
if proxy and proxy_auth:
auth = aiohttp.BasicAuth(*proxy_auth)
get_vars.update({'proxy':proxy, 'proxy_auth': auth})
async with aiohttp.ClientSession() as session:
async with session.get(**get_vars) as resp:
html = await resp.text()
with open(cache_path, 'wb') as stream:
pickle.dump(str(html), stream)
return str(html), False
def clear(self, engine=None):
"""
Clear the entire cache either by engine name
or just all
:param engine: engine to clear
"""
if not engine:
for engine_cache in self.engine_cache.values():
for root, dirs, files in os.walk(engine_cache):
for f in files:
os.remove(os.path.join(engine_cache, f))
else:
engine_cache = self.engine_cache[engine.lower()]
for _, _, files in os.walk(engine_cache):
for f in files:
os.remove(os.path.join(engine_cache, f))
| 37.019608 | 101 | 0.598782 | import os
import random
import pickle
import hashlib
import aiohttp
from fake_useragent import UserAgent
FILEPATH = os.path.dirname(os.path.abspath(__file__))
USER_AGENT_LIST = [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/72.0.3626.121 Safari/537.36",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100 101 Firefox/22.0",
"Mozilla/5.0 (Windows NT 6.1; rv:11.0) Gecko/20100101 Firefox/11.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.5 (KHTML, like Gecko) "
"Chrome/19.0.1084.46 Safari/536.5",
"Mozilla/5.0 (Windows; Windows NT 6.1) AppleWebKit/536.5 (KHTML, like Gecko) "
"Chrome/19.0.1084.46 Safari/536.5",
]
def get_rand_user_agent():
user_agent = random.choice(USER_AGENT_LIST)
try:
user_agent = UserAgent().random
except:
pass
return user_agent
class CacheHandler:
def __init__(self):
self.cache = os.path.join(FILEPATH, "cache")
engine_path = os.path.join(FILEPATH, "engines")
if not os.path.exists(self.cache):
os.makedirs(self.cache)
enginelist = os.listdir(engine_path)
self.engine_cache = {i[:-3]: os.path.join(self.cache, i[:-3]) for i in enginelist if i not in
("__init__.py")}
for cache in self.engine_cache.values():
if not os.path.exists(cache):
os.makedirs(cache)
async def get_source(self, engine, url, headers, cache=True,
proxy=None, proxy_auth=None):
encodedUrl = url.encode("utf-8")
urlhash = hashlib.sha256(encodedUrl).hexdigest()
engine = engine.lower()
cache_path = os.path.join(self.engine_cache[engine], urlhash)
if os.path.exists(cache_path) and cache:
with open(cache_path, 'rb') as stream:
return pickle.load(stream), True
get_vars = { 'url':url, 'headers':headers }
if proxy and proxy_auth:
auth = aiohttp.BasicAuth(*proxy_auth)
get_vars.update({'proxy':proxy, 'proxy_auth': auth})
async with aiohttp.ClientSession() as session:
async with session.get(**get_vars) as resp:
html = await resp.text()
with open(cache_path, 'wb') as stream:
pickle.dump(str(html), stream)
return str(html), False
def clear(self, engine=None):
if not engine:
for engine_cache in self.engine_cache.values():
for root, dirs, files in os.walk(engine_cache):
for f in files:
os.remove(os.path.join(engine_cache, f))
else:
engine_cache = self.engine_cache[engine.lower()]
for _, _, files in os.walk(engine_cache):
for f in files:
os.remove(os.path.join(engine_cache, f))
| true | true |
f7211f5a04fad86d5e96b8e6c5fee8d770e20d1e | 5,324 | py | Python | leddar_ros2/leddar_sensor.py | JulienStanguennec-Leddartech/leddar_ros2 | 15f2674d8e7c472bc56c4be9cfd41f0d8d39c0bf | [
"BSD-3-Clause"
] | null | null | null | leddar_ros2/leddar_sensor.py | JulienStanguennec-Leddartech/leddar_ros2 | 15f2674d8e7c472bc56c4be9cfd41f0d8d39c0bf | [
"BSD-3-Clause"
] | null | null | null | leddar_ros2/leddar_sensor.py | JulienStanguennec-Leddartech/leddar_ros2 | 15f2674d8e7c472bc56c4be9cfd41f0d8d39c0bf | [
"BSD-3-Clause"
] | null | null | null |
import sys
import os
import time
#Import ros2 py
import rclpy
from rclpy.node import Node
#Import messages
import sensor_msgs.msg as sensor_msgs
import std_msgs.msg as std_msgs
#Import parameters (to read parameters)
from rclpy.parameter import Parameter
import numpy as np
import leddar
def point_cloud(points, parent_frame):
""" Creates a point cloud message.
Args:
points: Nx3 array of xyz positions.
parent_frame: frame in which the point cloud is defined
Returns:
sensor_msgs/PointCloud2 message
Code source:
https://gist.github.com/pgorczak/5c717baa44479fa064eb8d33ea4587e0
References:
http://docs.ros.org/melodic/api/sensor_msgs/html/msg/PointCloud2.html
http://docs.ros.org/melodic/api/sensor_msgs/html/msg/PointField.html
http://docs.ros.org/melodic/api/std_msgs/html/msg/Header.html
"""
# In a PointCloud2 message, the point cloud is stored as an byte
# array. In order to unpack it, we also include some parameters
# which desribes the size of each individual point.
ros_dtype = sensor_msgs.PointField.FLOAT32
dtype = np.float32
itemsize = np.dtype(dtype).itemsize # A 32-bit float takes 4 bytes.
data = points.astype(dtype).tobytes()
# The fields specify what the bytes represents. The first 4 bytes
# represents the x-coordinate, the next 4 the y-coordinate, etc.
fields = [sensor_msgs.PointField(
name=n, offset=i*itemsize, datatype=ros_dtype, count=1)
for i, n in enumerate('xyz')]
# The PointCloud2 message also has a header which specifies which
# coordinate frame it is represented in.
header = std_msgs.Header(frame_id=parent_frame)
return sensor_msgs.PointCloud2(
header=header,
height=1,
width=points.shape[0],
is_dense=False,
is_bigendian=False,
fields=fields,
point_step=(itemsize * 3), # Every point consists of three float32s.
row_step=(itemsize * 3 * points.shape[0]),
data=data
)
class LeddarSensor(Node):
def __init__(self):
super().__init__('leddar_sensor')
#Declare point cloud publisher topic
self.publisher = self.create_publisher(sensor_msgs.PointCloud2, 'scan_cloud', 10)
#Declaire parameter for connection to leddar_sensor | Default values for pixell sensor (Ethernet)
self.declare_parameters(
namespace='',
parameters=[
('param1', '192.168.0.2'),
('device_type', 'Ethernet'),
('param3', 48630),
('param4', 0)
]
)
#Read parameters for connection to leddar_sensor
param1 = str(self.get_parameter('param1').value)
device_type = str(self.get_parameter('device_type').value)
param3 = int(self.get_parameter('param3').value)
param4 = int(self.get_parameter('param4').value)
#Create the sensor
self.dev = leddar.Device()
dev_type = 0
if(device_type != "not specified"):
dev_type = leddar.device_types[device_type]
if not self.dev.connect(param1, dev_type, param3, param4):
err_msg = 'Error connecting to device type {0} with connection info {1}/{2}/{3}.'.format(device_type, param1, str(param3), str(param4))
#rclpy.logerr(err_msg)
raise RuntimeError(err_msg)
self.get_logger().info('Connected to device type {0} with connection info {1}/{2}/{3}.'.format(device_type, param1, str(param3), str(param4)))
#dev_type_read = self.dev.get_property_value(leddar.property_ids["ID_DEVICE_TYPE"])
#dev_protocol = self.dev.get_property_value(leddar.property_ids["ID_DATA_SERVER_PROTOCOL"])
#Get info from sensor
#self.get_logger().info(f'ID_DEVICE_TYPE: {dev_protocol}')
#self.get_logger().info(f'ID_DATA_SERVER_PROTOCOL: {dev_protocol}')
#Set callback method
self.dev.set_callback_echo(self.echoes_callback)
#Set datamask to detections
self.dev.set_data_mask(leddar.data_masks["DM_ECHOES"])
#Optionnal : set the delay between two request to the sensor
self.dev.set_data_thread_delay(10000)
self.dev.start_data_thread()
#Callback functions for the data thread
def echoes_callback(self, echoes):
#keep valid echoes only
echoes['data'] = echoes['data'][np.bitwise_and(echoes['data']['flags'], 0x01).astype(np.bool)]
#extract data field
indices, flags, distances, amplitudes, x, y, z = [echoes['data'][x] for x in ['indices', 'flags', 'distances', 'amplitudes', 'x', 'y', 'z']]
#merge xyz into np array
xyz = np.array([x,y,z])
#convert xyz np array to sensors_msg.PointCloud2
message = point_cloud(xyz.T, 'map')
#publish PointCloud2
self.publisher.publish(message)
def main(args=None):
rclpy.init(args=args)
leddar_sensor = LeddarSensor()
rclpy.spin(leddar_sensor)
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
leddar_sensor.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| 33.696203 | 150 | 0.655147 |
import sys
import os
import time
import rclpy
from rclpy.node import Node
import sensor_msgs.msg as sensor_msgs
import std_msgs.msg as std_msgs
from rclpy.parameter import Parameter
import numpy as np
import leddar
def point_cloud(points, parent_frame):
ros_dtype = sensor_msgs.PointField.FLOAT32
dtype = np.float32
itemsize = np.dtype(dtype).itemsize
data = points.astype(dtype).tobytes()
fields = [sensor_msgs.PointField(
name=n, offset=i*itemsize, datatype=ros_dtype, count=1)
for i, n in enumerate('xyz')]
header = std_msgs.Header(frame_id=parent_frame)
return sensor_msgs.PointCloud2(
header=header,
height=1,
width=points.shape[0],
is_dense=False,
is_bigendian=False,
fields=fields,
point_step=(itemsize * 3),
row_step=(itemsize * 3 * points.shape[0]),
data=data
)
class LeddarSensor(Node):
def __init__(self):
super().__init__('leddar_sensor')
self.publisher = self.create_publisher(sensor_msgs.PointCloud2, 'scan_cloud', 10)
self.declare_parameters(
namespace='',
parameters=[
('param1', '192.168.0.2'),
('device_type', 'Ethernet'),
('param3', 48630),
('param4', 0)
]
)
param1 = str(self.get_parameter('param1').value)
device_type = str(self.get_parameter('device_type').value)
param3 = int(self.get_parameter('param3').value)
param4 = int(self.get_parameter('param4').value)
self.dev = leddar.Device()
dev_type = 0
if(device_type != "not specified"):
dev_type = leddar.device_types[device_type]
if not self.dev.connect(param1, dev_type, param3, param4):
err_msg = 'Error connecting to device type {0} with connection info {1}/{2}/{3}.'.format(device_type, param1, str(param3), str(param4))
raise RuntimeError(err_msg)
self.get_logger().info('Connected to device type {0} with connection info {1}/{2}/{3}.'.format(device_type, param1, str(param3), str(param4)))
self.dev.set_callback_echo(self.echoes_callback)
self.dev.set_data_mask(leddar.data_masks["DM_ECHOES"])
self.dev.set_data_thread_delay(10000)
self.dev.start_data_thread()
def echoes_callback(self, echoes):
echoes['data'] = echoes['data'][np.bitwise_and(echoes['data']['flags'], 0x01).astype(np.bool)]
indices, flags, distances, amplitudes, x, y, z = [echoes['data'][x] for x in ['indices', 'flags', 'distances', 'amplitudes', 'x', 'y', 'z']]
xyz = np.array([x,y,z])
message = point_cloud(xyz.T, 'map')
self.publisher.publish(message)
def main(args=None):
rclpy.init(args=args)
leddar_sensor = LeddarSensor()
rclpy.spin(leddar_sensor)
leddar_sensor.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| true | true |
f7211f913ac30f34f3eb6b9c021cc65dc21ed271 | 3,962 | py | Python | StimRespFlow/DataStruct/WaveData.py | powerfulbean/StellarWave | 877d5113054f391f605c8e39f1a0f60f7bfeeee1 | [
"MIT"
] | 3 | 2020-09-16T06:14:00.000Z | 2021-03-17T00:05:06.000Z | StimRespFlow/DataStruct/WaveData.py | powerfulbean/StellarWave | 877d5113054f391f605c8e39f1a0f60f7bfeeee1 | [
"MIT"
] | null | null | null | StimRespFlow/DataStruct/WaveData.py | powerfulbean/StellarWave | 877d5113054f391f605c8e39f1a0f60f7bfeeee1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 9 23:21:06 2021
@author: ShiningStone
"""
import datetime
import numpy as np
from .Abstract import CWaveData,CTimeStampsGen
class CDateTimeStampsGen(CTimeStampsGen):
def __init__(self,start:datetime.datetime,delta:datetime.timedelta,nLen):
super().__init__(start,delta,nLen)
class CBitalinoWaveData(CWaveData): # EEG unit: uV; EOG unit: mv
def __init__(self):
super().__init__(-1,-1,CTimeStampsGen(0, 0, 1)) #still can't decide this param at this time for bitalino file
def readFile(self,filename,mode = 'EEG'):
print("start reading bitalinofile")
from pylab import loadtxt
#file_name = 'opensignals_001403173836_2019-03-04_12-02-59.txt'
fullCont = list()
dataDescription = ''
import json
#read data description part
with open(filename,'r') as f:
for rowCont in f.readlines():
if(rowCont[0] == '#' and rowCont[2] != '{'):
pass
elif(rowCont[2] == '{'):
rowCont = rowCont[2:]
dataDescription = json.loads(rowCont)
break
else:
rowArray = rowCont.split("\t")
rowArray = rowArray[0:-1]
fullCont.append(rowArray)
data = loadtxt(filename)
# rowArrayNum = np.array(fullCont)
rowArrayNum = data
for key in dataDescription.keys(): #now the key is just the mac address of the device
dataDescription = dataDescription[key]
self.timestamps = rowArrayNum[:,0]
self.description = dataDescription
# print(dateTime.datetime.now())
if mode=='EEG':
self.nChan = 1
self.data = np.expand_dims(np.array(self.getRealSignal(rowArrayNum[:,-1],10,3.3,40000,'uV')), 0)
# self.rawdata = np.expand_dims(rowArrayNum[:,-1],0)
self.description["channelInfo"] = [[1],['EarEEG']]
elif mode == 'EOG':
self.nChan= 1
self.data = np.expand_dims(np.array(self.getRealSignal(rowArrayNum[:,-2],10,3.3,2040, 'mV')), 0)
self.description["channelInfo"] = [[1],['Eog']]
elif mode == 'EEGandEOG':
data1 = np.expand_dims(np.array(self.getRealSignal(rowArrayNum[:,-1],10,3.3,40000,'uV')), 0)
data2 = np.expand_dims(np.array(self.getRealSignal(rowArrayNum[:,-2],10,3.3,2040, 'uV')), 0)
self.nChan = 2
self.data = np.concatenate([data1,data2],0)
self.description['channelInfo'] = [[1,2],['EarEEG','Eog']]
else:
print("bitalino error: doesn't support this mode!")
# print(dateTime.datetime.now())
startTime = datetime.datetime.strptime( dataDescription['date'] + ' ' + dataDescription['time'], '%Y-%m-%d %H:%M:%S.%f')
self.srate = dataDescription["sampling rate"]
print("reading bitalinofile Finished")
delta = datetime.timedelta(seconds = 1/self.srate)
self.timeStampsGen = CDateTimeStampsGen(startTime,delta,len(self.timestamps))#initiate the timestamp sequence generator
self.calTimeStamp(self.timeStampsGen)
return data, dataDescription
def getRealSignal(self,sampleDataArray, bitNumber ,VCC = 3.3 , Geeg = 40000, unit = 'uV'):
output = [self._eegTransferFuntion(i,bitNumber ,VCC , Geeg) for i in sampleDataArray]
output = np.array(output)
if(unit == 'uV'):
output = output * (10**6)
elif(unit == 'mV'):
output = output * (10**3)
return output
def _eegTransferFuntion(self,sampleValue, bitNumber ,VCC, Geeg):
output = (( (sampleValue/2**bitNumber) - 1/2) * VCC ) / Geeg
return output
def __len__(self):
return len(self.data)
| 39.62 | 128 | 0.575719 |
import datetime
import numpy as np
from .Abstract import CWaveData,CTimeStampsGen
class CDateTimeStampsGen(CTimeStampsGen):
def __init__(self,start:datetime.datetime,delta:datetime.timedelta,nLen):
super().__init__(start,delta,nLen)
class CBitalinoWaveData(CWaveData):
def __init__(self):
super().__init__(-1,-1,CTimeStampsGen(0, 0, 1))
def readFile(self,filename,mode = 'EEG'):
print("start reading bitalinofile")
from pylab import loadtxt
#file_name = 'opensignals_001403173836_2019-03-04_12-02-59.txt'
fullCont = list()
dataDescription = ''
import json
#read data description part
with open(filename,'r') as f:
for rowCont in f.readlines():
if(rowCont[0] == '
pass
elif(rowCont[2] == '{'):
rowCont = rowCont[2:]
dataDescription = json.loads(rowCont)
break
else:
rowArray = rowCont.split("\t")
rowArray = rowArray[0:-1]
fullCont.append(rowArray)
data = loadtxt(filename)
# rowArrayNum = np.array(fullCont)
rowArrayNum = data
for key in dataDescription.keys(): #now the key is just the mac address of the device
dataDescription = dataDescription[key]
self.timestamps = rowArrayNum[:,0]
self.description = dataDescription
# print(dateTime.datetime.now())
if mode=='EEG':
self.nChan = 1
self.data = np.expand_dims(np.array(self.getRealSignal(rowArrayNum[:,-1],10,3.3,40000,'uV')), 0)
# self.rawdata = np.expand_dims(rowArrayNum[:,-1],0)
self.description["channelInfo"] = [[1],['EarEEG']]
elif mode == 'EOG':
self.nChan= 1
self.data = np.expand_dims(np.array(self.getRealSignal(rowArrayNum[:,-2],10,3.3,2040, 'mV')), 0)
self.description["channelInfo"] = [[1],['Eog']]
elif mode == 'EEGandEOG':
data1 = np.expand_dims(np.array(self.getRealSignal(rowArrayNum[:,-1],10,3.3,40000,'uV')), 0)
data2 = np.expand_dims(np.array(self.getRealSignal(rowArrayNum[:,-2],10,3.3,2040, 'uV')), 0)
self.nChan = 2
self.data = np.concatenate([data1,data2],0)
self.description['channelInfo'] = [[1,2],['EarEEG','Eog']]
else:
print("bitalino error: doesn't support this mode!")
startTime = datetime.datetime.strptime( dataDescription['date'] + ' ' + dataDescription['time'], '%Y-%m-%d %H:%M:%S.%f')
self.srate = dataDescription["sampling rate"]
print("reading bitalinofile Finished")
delta = datetime.timedelta(seconds = 1/self.srate)
self.timeStampsGen = CDateTimeStampsGen(startTime,delta,len(self.timestamps))
self.calTimeStamp(self.timeStampsGen)
return data, dataDescription
def getRealSignal(self,sampleDataArray, bitNumber ,VCC = 3.3 , Geeg = 40000, unit = 'uV'):
output = [self._eegTransferFuntion(i,bitNumber ,VCC , Geeg) for i in sampleDataArray]
output = np.array(output)
if(unit == 'uV'):
output = output * (10**6)
elif(unit == 'mV'):
output = output * (10**3)
return output
def _eegTransferFuntion(self,sampleValue, bitNumber ,VCC, Geeg):
output = (( (sampleValue/2**bitNumber) - 1/2) * VCC ) / Geeg
return output
def __len__(self):
return len(self.data)
| true | true |
f721210773ad82cd155b9581ac29c5f1c9609d67 | 20,043 | py | Python | conda/models/match_spec.py | abar2day/najran | 3a30636f494275b0f259be7b1875fd0fd7759f20 | [
"BSD-3-Clause"
] | 1 | 2017-06-11T01:32:33.000Z | 2017-06-11T01:32:33.000Z | conda/models/match_spec.py | abar2day/najran | 3a30636f494275b0f259be7b1875fd0fd7759f20 | [
"BSD-3-Clause"
] | null | null | null | conda/models/match_spec.py | abar2day/najran | 3a30636f494275b0f259be7b1875fd0fd7759f20 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from abc import ABCMeta, abstractmethod, abstractproperty
from collections import Mapping
import re
from .channel import Channel, MultiChannel
from .dist import Dist
from .index_record import IndexRecord
from .version import BuildNumberMatch, VersionSpec
from .._vendor.auxlib.collection import frozendict
from ..base.constants import CONDA_TARBALL_EXTENSION
from ..common.compat import isiterable, iteritems, string_types, text_type, with_metaclass
from ..common.path import expand
from ..common.url import is_url, path_to_url, unquote
from ..exceptions import CondaValueError
try:
from cytoolz.itertoolz import concat
except ImportError: # pragma: no cover
from .._vendor.toolz.itertoolz import concat # NOQA
class MatchSpecType(type):
def __call__(cls, spec_arg=None, **kwargs):
if spec_arg:
if isinstance(spec_arg, MatchSpec) and not kwargs:
return spec_arg
elif isinstance(spec_arg, MatchSpec):
kwargs.setdefault('optional', spec_arg.optional)
kwargs.setdefault('target', spec_arg.target)
kwargs.update(spec_arg._match_components)
return super(MatchSpecType, cls).__call__(**kwargs)
elif isinstance(spec_arg, string_types):
parsed = _parse_spec_str(spec_arg)
parsed.update(kwargs)
return super(MatchSpecType, cls).__call__(**parsed)
elif isinstance(spec_arg, Mapping):
parsed = dict(spec_arg, **kwargs)
return super(MatchSpecType, cls).__call__(**parsed)
elif isinstance(spec_arg, Dist):
# TODO: remove this branch
parsed = {
'fn': spec_arg.to_filename(),
'channel': spec_arg.channel,
}
return super(MatchSpecType, cls).__call__(**parsed)
elif isinstance(spec_arg, IndexRecord):
# TODO: remove this branch
parsed = {
'name': spec_arg.name,
'fn': spec_arg.fn,
'channel': spec_arg.channel,
}
return super(MatchSpecType, cls).__call__(**parsed)
elif hasattr(spec_arg, 'dump'):
parsed = spec_arg.dump()
parsed.update(kwargs)
return super(MatchSpecType, cls).__call__(**parsed)
else:
raise CondaValueError("Invalid MatchSpec:\n spec_arg=%s\n kwargs=%s"
% (spec_arg, kwargs))
else:
return super(MatchSpecType, cls).__call__(**kwargs)
@with_metaclass(MatchSpecType)
class MatchSpec(object):
"""
The easiest way to build `MatchSpec` objects that match to arbitrary fields is to
use a keyword syntax. For instance,
MatchSpec(name='foo', build='py2*', channel='conda-forge')
matches any package named `foo` built with a Python 2 build string in the
`conda-forge` channel. Available keywords to be matched against are fields of
the `IndexRecord` model object.
Strings are interpreted using the following conventions:
- If the string begins with `^` and ends with `$`, it is converted to a regex.
- If the string contains an asterisk (`*`), it is transformed from a glob to a regex.
- Otherwise, an exact match to the string is sought.
The `.match()` method accepts an `IndexRecord` or dictionary, and matches can pull
from any field in that record.
Great pain has been taken to preserve back-compatibility with the standard
`name version build` syntax. But strictly speaking it is not necessary. Now, the
following are all equivalent:
- `MatchSpec('foo 1.0 py27_0', optional=True)`
- `MatchSpec("* [name='foo',version='1.0',build='py27_0']", optional=True)`
- `MatchSpec("foo[version='1.0',optional,build='py27_0']")`
- `MatchSpec(name='foo', optional=True, version='1.0', build='py27_0')`
"""
FIELD_NAMES = (
'channel',
'subdir',
'name',
'version',
'build',
'build_number',
'track_features',
'md5',
)
def __init__(self, optional=False, target=None, **kwargs):
self.optional = optional
self.target = target
self._match_components = self._build_components(**kwargs)
def get_exact_value(self, field_name):
v = self._match_components.get(field_name)
return v and v.exact_value
def get_raw_value(self, field_name):
v = self._match_components.get(field_name)
return v and v.raw_value
def _is_simple(self):
return len(self._match_components) == 1 and self.get_exact_value('name') is not None
def _is_single(self):
return len(self._match_components) == 1
def match(self, rec):
"""
Accepts an `IndexRecord` or a dict, and matches can pull from any field
in that record. Returns True for a match, and False for no match.
"""
for f, v in iteritems(self._match_components):
val = getattr(rec, f)
if not (v.match(val) if hasattr(v, 'match') else v == val):
return False
return True
def _to_filename_do_not_use(self):
# WARNING: this is potentially unreliable and use should probably be limited
# returns None if a filename can't be constructed
fn_field = self.get_exact_value('fn')
if fn_field:
return fn_field
vals = tuple(self.get_exact_value(x) for x in ('name', 'version', 'build'))
if not any(x is None for x in vals):
return '%s-%s-%s.tar.bz2' % vals
else:
return None
def __repr__(self):
builder = []
builder += ["%s=%r" % (c, self._match_components[c])
for c in self.FIELD_NAMES if c in self._match_components]
if self.optional:
builder.append("optional=True")
if self.target:
builder.append("target=%r" % self.target)
return "%s(%s)" % (self.__class__.__name__, ', '.join(builder))
def __str__(self):
builder = []
channel_matcher = self._match_components.get('channel')
if channel_matcher:
builder.append(text_type(channel_matcher))
subdir_matcher = self._match_components.get('subdir')
if subdir_matcher:
builder.append(('/%s' if builder else '*/%s') % subdir_matcher)
name_matcher = self._match_components.get('name', '*')
builder.append(('::%s' if builder else '%s') % name_matcher)
xtra = []
version = self._match_components.get('version')
if version:
version = text_type(version)
if any(s in version for s in '><$^|,'):
xtra.append("version='%s'" % version)
elif version.endswith('.*'):
builder.append('=' + version[:-2])
elif version.endswith('*'):
builder.append('=' + version[:-1])
else:
builder.append('==' + version)
_skip = ('channel', 'subdir', 'name', 'version')
for key in self.FIELD_NAMES:
if key not in _skip and key in self._match_components:
value = text_type(self._match_components[key])
if any(s in value for s in ', ='):
xtra.append("%s='%s'" % (key, self._match_components[key]))
else:
xtra.append("%s=%s" % (key, self._match_components[key]))
if xtra:
builder.append('[%s]' % ','.join(xtra))
return ''.join(builder)
def conda_build_form(self):
builder = []
name = self.get_exact_value('name')
assert name
builder.append(name)
build = self.get_raw_value('build')
version = self.get_raw_value('version')
if build:
assert version
builder += [version, build]
elif version:
builder.append(version)
return ' '.join(builder)
def __eq__(self, other):
if isinstance(other, MatchSpec):
self_key = self._match_components, self.optional, self.target
other_key = other._match_components, other.optional, other.target
return self_key == other_key
else:
return False
def __hash__(self):
return hash(self._match_components)
def __contains__(self, field):
return field in self._match_components
@staticmethod
def _build_components(**kwargs):
def _make(field_name, value):
if field_name not in IndexRecord.__fields__:
raise CondaValueError('Cannot match on field %s' % (field_name,))
elif isinstance(value, string_types):
value = text_type(value)
if hasattr(value, 'match'):
matcher = value
elif field_name in _implementors:
matcher = _implementors[field_name](value)
elif text_type(value):
matcher = StrMatch(value)
else:
raise NotImplementedError()
return matcher
return frozendict((key, _make(key, value)) for key, value in iteritems(kwargs))
@property
def name(self):
return self.get_exact_value('name') or '*'
#
# Remaining methods are for back compatibility with conda-build. Do not remove
# without coordination with the conda-build team.
#
@property
def strictness(self):
# With the old MatchSpec, strictness==3 if name, version, and
# build were all specified.
s = sum(f in self._match_components for f in ('name', 'version', 'build'))
if s < len(self._match_components):
return 3
elif not self.get_exact_value('name') or 'build' in self._match_components:
return 3
elif 'version' in self._match_components:
return 2
else:
return 1
@property
def spec(self):
return self.conda_build_form()
@property
def version(self):
# in the old MatchSpec object, version was a VersionSpec, not a str
# so we'll keep that API here
return self._match_components.get('version')
def _parse_version_plus_build(v_plus_b):
"""This should reliably pull the build string out of a version + build string combo.
Examples:
>>> _parse_version_plus_build("=1.2.3 0")
('=1.2.3', '0')
>>> _parse_version_plus_build("1.2.3=0")
('1.2.3', '0')
>>> _parse_version_plus_build(">=1.0 , < 2.0 py34_0")
('>=1.0,<2.0', 'py34_0')
>>> _parse_version_plus_build(">=1.0 , < 2.0 =py34_0")
('>=1.0,<2.0', 'py34_0')
>>> _parse_version_plus_build("=1.2.3 ")
('=1.2.3', None)
>>> _parse_version_plus_build(">1.8,<2|==1.7")
('>1.8,<2|==1.7', None)
>>> _parse_version_plus_build("* openblas_0")
('*', 'openblas_0')
>>> _parse_version_plus_build("* *")
('*', '*')
"""
parts = re.search(r'((?:.+?)[^><!,|]?)(?:(?<![=!|,<>])(?:[ =])([^-=,|<>]+?))?$', v_plus_b)
if parts:
version, build = parts.groups()
build = build and build.strip()
else:
version, build = v_plus_b, None
return version and version.replace(' ', ''), build
def _parse_legacy_dist(dist_str):
"""
Examples:
>>> _parse_legacy_dist("_license-1.1-py27_1.tar.bz2")
('_license', '1.1', 'py27_1')
>>> _parse_legacy_dist("_license-1.1-py27_1")
('_license', '1.1', 'py27_1')
"""
if dist_str.endswith(CONDA_TARBALL_EXTENSION):
dist_str = dist_str[:-len(CONDA_TARBALL_EXTENSION)]
name, version, build = dist_str.rsplit('-', 2)
return name, version, build
def _parse_channel(channel_val):
if not channel_val:
return None, None
chn = Channel(channel_val)
channel_name = chn.name if isinstance(chn, MultiChannel) else chn.canonical_name
return channel_name, chn.subdir
def _parse_spec_str(spec_str):
# Step 1. strip '#' comment
if '#' in spec_str:
ndx = spec_str.index('#')
spec_str, _ = spec_str[:ndx], spec_str[ndx:]
spec_str.strip()
# Step 2. done if spec_str is a tarball
if spec_str.endswith(CONDA_TARBALL_EXTENSION):
# treat as a normal url
if not is_url(spec_str):
spec_str = unquote(path_to_url(expand(spec_str)))
channel = Channel(spec_str)
if not channel.subdir:
# url is not a channel
raise CondaValueError("Invalid MatchSpec Channel: %s" % spec_str)
name, version, build = _parse_legacy_dist(channel.package_filename)
result = {
'channel': channel.canonical_name,
'subdir': channel.subdir,
'name': name,
'version': version,
'build': build,
'fn': channel.package_filename,
}
return result
# Step 3. strip off brackets portion
brackets = {}
m1 = re.match(r'^(.*)(?:\[(.*)\])$', spec_str)
if m1:
spec_str, brackets_str = m1.groups()
brackets_str = brackets_str.strip("[]\n\r\t ")
m5 = re.finditer(r'([a-zA-Z0-9_-]+?)=(["\']?)([^\'"]*?)(\2)(?:[, ]|$)', brackets_str)
for match in m5:
key, _, value, _ = match.groups()
if not key or not value:
raise CondaValueError("Invalid MatchSpec: %s" % spec_str)
brackets[key] = value
# Step 4. strip off '::' channel and namespace
m2 = spec_str.rsplit(':', 2)
m2_len = len(m2)
if m2_len == 3:
channel_str, namespace, spec_str = m2
elif m2_len == 2:
namespace, spec_str = m2
channel_str = None
elif m2_len:
spec_str = m2[0]
channel_str, namespace = None, None
else:
raise NotImplementedError()
channel, subdir = _parse_channel(channel_str)
if 'channel' in brackets:
b_channel, b_subdir = _parse_channel(brackets.pop('channel'))
if b_channel:
channel = b_channel
if b_subdir:
subdir = b_subdir
if 'subdir' in brackets:
subdir = brackets.pop('subdir')
# Step 5. strip off package name from remaining version + build
m3 = re.match(r'([^ =<>!]+)?([><!= ].+)?', spec_str)
if m3:
name, spec_str = m3.groups()
if name is None:
raise CondaValueError("Invalid MatchSpec: %s" % spec_str)
else:
raise CondaValueError("Invalid MatchSpec: %s" % spec_str)
# Step 6. sort out version + build
spec_str = spec_str and spec_str.strip()
if spec_str:
if '[' in spec_str:
raise CondaValueError("Invalid MatchSpec: %s" % spec_str)
version, build = _parse_version_plus_build(spec_str)
# translate version '=1.2.3' to '1.2.3*'
# is it a simple version starting with '='? i.e. '=1.2.3'
if version.startswith('='):
test_str = version[1:]
if version.startswith('==') and build is None:
version = version[2:]
elif not any(c in test_str for c in "=,|"):
if build is None and not test_str.endswith('*'):
version = test_str + '*'
else:
version = test_str
else:
version, build = None, None
# Step 7. now compile components together
components = {}
components['name'] = name if name else '*'
if channel is not None:
components['channel'] = channel
if subdir is not None:
components['subdir'] = subdir
if namespace is not None:
# components['namespace'] = namespace
pass
if version is not None:
components['version'] = version
if build is not None:
components['build'] = build
# anything in brackets will now strictly override key as set in other area of spec str
components.update(brackets)
return components
@with_metaclass(ABCMeta)
class MatchInterface(object):
def __init__(self, value):
self._raw_value = value
@abstractmethod
def match(self, other):
raise NotImplementedError
def matches(self, value):
return self.match(value)
@property
def raw_value(self):
return self._raw_value
@abstractproperty
def exact_value(self):
"""If the match value is an exact specification, returns the value.
Otherwise returns None.
"""
raise NotImplementedError()
class SplitStrMatch(MatchInterface):
__slots__ = '_raw_value',
def __init__(self, value):
super(SplitStrMatch, self).__init__(self._convert(value))
def _convert(self, value):
try:
return frozenset(value.replace(' ', ',').split(','))
except AttributeError:
if isiterable(value):
return frozenset(value)
raise
def match(self, other):
try:
return other and self._raw_value & other._raw_value
except AttributeError:
return self._raw_value & self._convert(other)
def __repr__(self):
if self._raw_value:
return "{%s}" % ', '.join("'%s'" % s for s in sorted(self._raw_value))
else:
return 'set()'
def __str__(self):
# this space delimiting makes me nauseous
return ' '.join(sorted(self._raw_value))
def __eq__(self, other):
return self.match(other)
def __hash__(self):
return hash(self._raw_value)
@property
def exact_value(self):
return self._raw_value
class ChannelMatch(MatchInterface):
__slots__ = '_raw_value',
def __init__(self, value):
super(ChannelMatch, self).__init__(Channel(value))
def match(self, other):
try:
return self._raw_value.canonical_name == other._raw_value.canonical_name
except AttributeError:
return self._raw_value.canonical_name == Channel(other).canonical_name
def __str__(self):
return "%s" % self._raw_value.canonical_name
def __repr__(self):
return "'%s'" % self._raw_value.canonical_name
def __eq__(self, other):
return self.match(other)
def __hash__(self):
return hash(self._raw_value)
@property
def exact_value(self):
return self._raw_value
class StrMatch(MatchInterface):
__slots__ = '_raw_value', '_re_match'
def __init__(self, value):
super(StrMatch, self).__init__(value)
self._re_match = None
if value.startswith('^') and value.endswith('$'):
self._re_match = re.compile(value).match
elif '*' in value:
self._re_match = re.compile(r'^(?:%s)$' % value.replace('*', r'.*')).match
def match(self, other):
try:
_other_val = other._raw_value
except AttributeError:
_other_val = text_type(other)
if self._re_match:
return self._re_match(_other_val)
else:
return self._raw_value == _other_val
def __str__(self):
return self._raw_value
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self._raw_value)
def __eq__(self, other):
return self.match(other)
def __hash__(self):
return hash(self._raw_value)
@property
def exact_value(self):
return self._raw_value if self._re_match is None else None
class LowerStrMatch(StrMatch):
def __init__(self, value):
super(LowerStrMatch, self).__init__(value.lower())
_implementors = {
'name': LowerStrMatch,
'features': SplitStrMatch,
'track_features': SplitStrMatch,
'version': VersionSpec,
'build_number': BuildNumberMatch,
'channel': ChannelMatch,
}
| 32.857377 | 94 | 0.592925 |
from __future__ import absolute_import, division, print_function, unicode_literals
from abc import ABCMeta, abstractmethod, abstractproperty
from collections import Mapping
import re
from .channel import Channel, MultiChannel
from .dist import Dist
from .index_record import IndexRecord
from .version import BuildNumberMatch, VersionSpec
from .._vendor.auxlib.collection import frozendict
from ..base.constants import CONDA_TARBALL_EXTENSION
from ..common.compat import isiterable, iteritems, string_types, text_type, with_metaclass
from ..common.path import expand
from ..common.url import is_url, path_to_url, unquote
from ..exceptions import CondaValueError
try:
from cytoolz.itertoolz import concat
except ImportError:
from .._vendor.toolz.itertoolz import concat
class MatchSpecType(type):
def __call__(cls, spec_arg=None, **kwargs):
if spec_arg:
if isinstance(spec_arg, MatchSpec) and not kwargs:
return spec_arg
elif isinstance(spec_arg, MatchSpec):
kwargs.setdefault('optional', spec_arg.optional)
kwargs.setdefault('target', spec_arg.target)
kwargs.update(spec_arg._match_components)
return super(MatchSpecType, cls).__call__(**kwargs)
elif isinstance(spec_arg, string_types):
parsed = _parse_spec_str(spec_arg)
parsed.update(kwargs)
return super(MatchSpecType, cls).__call__(**parsed)
elif isinstance(spec_arg, Mapping):
parsed = dict(spec_arg, **kwargs)
return super(MatchSpecType, cls).__call__(**parsed)
elif isinstance(spec_arg, Dist):
parsed = {
'fn': spec_arg.to_filename(),
'channel': spec_arg.channel,
}
return super(MatchSpecType, cls).__call__(**parsed)
elif isinstance(spec_arg, IndexRecord):
parsed = {
'name': spec_arg.name,
'fn': spec_arg.fn,
'channel': spec_arg.channel,
}
return super(MatchSpecType, cls).__call__(**parsed)
elif hasattr(spec_arg, 'dump'):
parsed = spec_arg.dump()
parsed.update(kwargs)
return super(MatchSpecType, cls).__call__(**parsed)
else:
raise CondaValueError("Invalid MatchSpec:\n spec_arg=%s\n kwargs=%s"
% (spec_arg, kwargs))
else:
return super(MatchSpecType, cls).__call__(**kwargs)
@with_metaclass(MatchSpecType)
class MatchSpec(object):
FIELD_NAMES = (
'channel',
'subdir',
'name',
'version',
'build',
'build_number',
'track_features',
'md5',
)
def __init__(self, optional=False, target=None, **kwargs):
self.optional = optional
self.target = target
self._match_components = self._build_components(**kwargs)
def get_exact_value(self, field_name):
v = self._match_components.get(field_name)
return v and v.exact_value
def get_raw_value(self, field_name):
v = self._match_components.get(field_name)
return v and v.raw_value
def _is_simple(self):
return len(self._match_components) == 1 and self.get_exact_value('name') is not None
def _is_single(self):
return len(self._match_components) == 1
def match(self, rec):
for f, v in iteritems(self._match_components):
val = getattr(rec, f)
if not (v.match(val) if hasattr(v, 'match') else v == val):
return False
return True
def _to_filename_do_not_use(self):
fn_field = self.get_exact_value('fn')
if fn_field:
return fn_field
vals = tuple(self.get_exact_value(x) for x in ('name', 'version', 'build'))
if not any(x is None for x in vals):
return '%s-%s-%s.tar.bz2' % vals
else:
return None
def __repr__(self):
builder = []
builder += ["%s=%r" % (c, self._match_components[c])
for c in self.FIELD_NAMES if c in self._match_components]
if self.optional:
builder.append("optional=True")
if self.target:
builder.append("target=%r" % self.target)
return "%s(%s)" % (self.__class__.__name__, ', '.join(builder))
def __str__(self):
builder = []
channel_matcher = self._match_components.get('channel')
if channel_matcher:
builder.append(text_type(channel_matcher))
subdir_matcher = self._match_components.get('subdir')
if subdir_matcher:
builder.append(('/%s' if builder else '*/%s') % subdir_matcher)
name_matcher = self._match_components.get('name', '*')
builder.append(('::%s' if builder else '%s') % name_matcher)
xtra = []
version = self._match_components.get('version')
if version:
version = text_type(version)
if any(s in version for s in '><$^|,'):
xtra.append("version='%s'" % version)
elif version.endswith('.*'):
builder.append('=' + version[:-2])
elif version.endswith('*'):
builder.append('=' + version[:-1])
else:
builder.append('==' + version)
_skip = ('channel', 'subdir', 'name', 'version')
for key in self.FIELD_NAMES:
if key not in _skip and key in self._match_components:
value = text_type(self._match_components[key])
if any(s in value for s in ', ='):
xtra.append("%s='%s'" % (key, self._match_components[key]))
else:
xtra.append("%s=%s" % (key, self._match_components[key]))
if xtra:
builder.append('[%s]' % ','.join(xtra))
return ''.join(builder)
def conda_build_form(self):
builder = []
name = self.get_exact_value('name')
assert name
builder.append(name)
build = self.get_raw_value('build')
version = self.get_raw_value('version')
if build:
assert version
builder += [version, build]
elif version:
builder.append(version)
return ' '.join(builder)
def __eq__(self, other):
if isinstance(other, MatchSpec):
self_key = self._match_components, self.optional, self.target
other_key = other._match_components, other.optional, other.target
return self_key == other_key
else:
return False
def __hash__(self):
return hash(self._match_components)
def __contains__(self, field):
return field in self._match_components
@staticmethod
def _build_components(**kwargs):
def _make(field_name, value):
if field_name not in IndexRecord.__fields__:
raise CondaValueError('Cannot match on field %s' % (field_name,))
elif isinstance(value, string_types):
value = text_type(value)
if hasattr(value, 'match'):
matcher = value
elif field_name in _implementors:
matcher = _implementors[field_name](value)
elif text_type(value):
matcher = StrMatch(value)
else:
raise NotImplementedError()
return matcher
return frozendict((key, _make(key, value)) for key, value in iteritems(kwargs))
@property
def name(self):
return self.get_exact_value('name') or '*'
#
# Remaining methods are for back compatibility with conda-build. Do not remove
# without coordination with the conda-build team.
#
@property
def strictness(self):
# With the old MatchSpec, strictness==3 if name, version, and
# build were all specified.
s = sum(f in self._match_components for f in ('name', 'version', 'build'))
if s < len(self._match_components):
return 3
elif not self.get_exact_value('name') or 'build' in self._match_components:
return 3
elif 'version' in self._match_components:
return 2
else:
return 1
@property
def spec(self):
return self.conda_build_form()
@property
def version(self):
# in the old MatchSpec object, version was a VersionSpec, not a str
# so we'll keep that API here
return self._match_components.get('version')
def _parse_version_plus_build(v_plus_b):
parts = re.search(r'((?:.+?)[^><!,|]?)(?:(?<![=!|,<>])(?:[ =])([^-=,|<>]+?))?$', v_plus_b)
if parts:
version, build = parts.groups()
build = build and build.strip()
else:
version, build = v_plus_b, None
return version and version.replace(' ', ''), build
def _parse_legacy_dist(dist_str):
if dist_str.endswith(CONDA_TARBALL_EXTENSION):
dist_str = dist_str[:-len(CONDA_TARBALL_EXTENSION)]
name, version, build = dist_str.rsplit('-', 2)
return name, version, build
def _parse_channel(channel_val):
if not channel_val:
return None, None
chn = Channel(channel_val)
channel_name = chn.name if isinstance(chn, MultiChannel) else chn.canonical_name
return channel_name, chn.subdir
def _parse_spec_str(spec_str):
if '#' in spec_str:
ndx = spec_str.index('#')
spec_str, _ = spec_str[:ndx], spec_str[ndx:]
spec_str.strip()
if spec_str.endswith(CONDA_TARBALL_EXTENSION):
if not is_url(spec_str):
spec_str = unquote(path_to_url(expand(spec_str)))
channel = Channel(spec_str)
if not channel.subdir:
raise CondaValueError("Invalid MatchSpec Channel: %s" % spec_str)
name, version, build = _parse_legacy_dist(channel.package_filename)
result = {
'channel': channel.canonical_name,
'subdir': channel.subdir,
'name': name,
'version': version,
'build': build,
'fn': channel.package_filename,
}
return result
brackets = {}
m1 = re.match(r'^(.*)(?:\[(.*)\])$', spec_str)
if m1:
spec_str, brackets_str = m1.groups()
brackets_str = brackets_str.strip("[]\n\r\t ")
m5 = re.finditer(r'([a-zA-Z0-9_-]+?)=(["\']?)([^\'"]*?)(\2)(?:[, ]|$)', brackets_str)
for match in m5:
key, _, value, _ = match.groups()
if not key or not value:
raise CondaValueError("Invalid MatchSpec: %s" % spec_str)
brackets[key] = value
m2 = spec_str.rsplit(':', 2)
m2_len = len(m2)
if m2_len == 3:
channel_str, namespace, spec_str = m2
elif m2_len == 2:
namespace, spec_str = m2
channel_str = None
elif m2_len:
spec_str = m2[0]
channel_str, namespace = None, None
else:
raise NotImplementedError()
channel, subdir = _parse_channel(channel_str)
if 'channel' in brackets:
b_channel, b_subdir = _parse_channel(brackets.pop('channel'))
if b_channel:
channel = b_channel
if b_subdir:
subdir = b_subdir
if 'subdir' in brackets:
subdir = brackets.pop('subdir')
m3 = re.match(r'([^ =<>!]+)?([><!= ].+)?', spec_str)
if m3:
name, spec_str = m3.groups()
if name is None:
raise CondaValueError("Invalid MatchSpec: %s" % spec_str)
else:
raise CondaValueError("Invalid MatchSpec: %s" % spec_str)
spec_str = spec_str and spec_str.strip()
if spec_str:
if '[' in spec_str:
raise CondaValueError("Invalid MatchSpec: %s" % spec_str)
version, build = _parse_version_plus_build(spec_str)
if version.startswith('='):
test_str = version[1:]
if version.startswith('==') and build is None:
version = version[2:]
elif not any(c in test_str for c in "=,|"):
if build is None and not test_str.endswith('*'):
version = test_str + '*'
else:
version = test_str
else:
version, build = None, None
components = {}
components['name'] = name if name else '*'
if channel is not None:
components['channel'] = channel
if subdir is not None:
components['subdir'] = subdir
if namespace is not None:
pass
if version is not None:
components['version'] = version
if build is not None:
components['build'] = build
components.update(brackets)
return components
@with_metaclass(ABCMeta)
class MatchInterface(object):
def __init__(self, value):
self._raw_value = value
@abstractmethod
def match(self, other):
raise NotImplementedError
def matches(self, value):
return self.match(value)
@property
def raw_value(self):
return self._raw_value
@abstractproperty
def exact_value(self):
raise NotImplementedError()
class SplitStrMatch(MatchInterface):
__slots__ = '_raw_value',
def __init__(self, value):
super(SplitStrMatch, self).__init__(self._convert(value))
def _convert(self, value):
try:
return frozenset(value.replace(' ', ',').split(','))
except AttributeError:
if isiterable(value):
return frozenset(value)
raise
def match(self, other):
try:
return other and self._raw_value & other._raw_value
except AttributeError:
return self._raw_value & self._convert(other)
def __repr__(self):
if self._raw_value:
return "{%s}" % ', '.join("'%s'" % s for s in sorted(self._raw_value))
else:
return 'set()'
def __str__(self):
return ' '.join(sorted(self._raw_value))
def __eq__(self, other):
return self.match(other)
def __hash__(self):
return hash(self._raw_value)
@property
def exact_value(self):
return self._raw_value
class ChannelMatch(MatchInterface):
__slots__ = '_raw_value',
def __init__(self, value):
super(ChannelMatch, self).__init__(Channel(value))
def match(self, other):
try:
return self._raw_value.canonical_name == other._raw_value.canonical_name
except AttributeError:
return self._raw_value.canonical_name == Channel(other).canonical_name
def __str__(self):
return "%s" % self._raw_value.canonical_name
def __repr__(self):
return "'%s'" % self._raw_value.canonical_name
def __eq__(self, other):
return self.match(other)
def __hash__(self):
return hash(self._raw_value)
@property
def exact_value(self):
return self._raw_value
class StrMatch(MatchInterface):
__slots__ = '_raw_value', '_re_match'
def __init__(self, value):
super(StrMatch, self).__init__(value)
self._re_match = None
if value.startswith('^') and value.endswith('$'):
self._re_match = re.compile(value).match
elif '*' in value:
self._re_match = re.compile(r'^(?:%s)$' % value.replace('*', r'.*')).match
def match(self, other):
try:
_other_val = other._raw_value
except AttributeError:
_other_val = text_type(other)
if self._re_match:
return self._re_match(_other_val)
else:
return self._raw_value == _other_val
def __str__(self):
return self._raw_value
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self._raw_value)
def __eq__(self, other):
return self.match(other)
def __hash__(self):
return hash(self._raw_value)
@property
def exact_value(self):
return self._raw_value if self._re_match is None else None
class LowerStrMatch(StrMatch):
def __init__(self, value):
super(LowerStrMatch, self).__init__(value.lower())
_implementors = {
'name': LowerStrMatch,
'features': SplitStrMatch,
'track_features': SplitStrMatch,
'version': VersionSpec,
'build_number': BuildNumberMatch,
'channel': ChannelMatch,
}
| true | true |
f721212419baf5ea18640832b738d3e1f17382a7 | 6,485 | py | Python | tests/integration-tests/cfn_stacks_factory.py | agobeaux/aws-parallelcluster | ec337c6b8341f9b84616b6bbbe8687a0a5f71126 | [
"Apache-2.0"
] | null | null | null | tests/integration-tests/cfn_stacks_factory.py | agobeaux/aws-parallelcluster | ec337c6b8341f9b84616b6bbbe8687a0a5f71126 | [
"Apache-2.0"
] | null | null | null | tests/integration-tests/cfn_stacks_factory.py | agobeaux/aws-parallelcluster | ec337c6b8341f9b84616b6bbbe8687a0a5f71126 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import logging
import boto3
from botocore.exceptions import ClientError
from retrying import retry
from utils import retrieve_cfn_outputs, retrieve_cfn_resources, set_credentials, unset_credentials
class CfnStack:
"""Identify a CloudFormation stack."""
def __init__(self, name, region, template, parameters=None):
self.name = name
self.region = region
self.template = template
self.parameters = parameters or []
self.cfn_stack_id = None
self.__cfn_outputs = None
self.__cfn_resources = None
@property
def cfn_outputs(self):
"""
Return the CloudFormation stack outputs for the stack.
Outputs are retrieved only once and then cached.
"""
if not self.__cfn_outputs:
self.__cfn_outputs = retrieve_cfn_outputs(self.name, self.region)
return self.__cfn_outputs
@property
def cfn_resources(self):
"""
Return the CloudFormation stack resources for the stack.
Resources are retrieved only once and then cached.
"""
if not self.__cfn_resources:
self.__cfn_resources = retrieve_cfn_resources(self.name, self.region)
return self.__cfn_resources
class CfnStacksFactory:
"""Manage creation and deletion of CloudFormation stacks."""
def __init__(self, credentials):
self.__created_stacks = {}
self.__credentials = credentials
def create_stack(self, stack):
"""
Create a cfn stack with a given template.
:param stack: stack to create.
"""
name = stack.name
region = stack.region
try:
set_credentials(region, self.__credentials)
id = self.__get_stack_internal_id(name, region)
if id in self.__created_stacks:
raise ValueError("Stack {0} already exists in region {1}".format(name, region))
logging.info("Creating stack {0} in region {1}".format(name, region))
self.__created_stacks[id] = stack
try:
cfn_client = boto3.client("cloudformation", region_name=region)
result = cfn_client.create_stack(
StackName=name, TemplateBody=stack.template, Parameters=stack.parameters
)
stack.cfn_stack_id = result["StackId"]
final_status = self.__wait_for_stack_creation(stack.cfn_stack_id, cfn_client)
self.__assert_stack_status(final_status, "CREATE_COMPLETE")
except Exception as e:
logging.error("Creation of stack {0} in region {1} failed with exception: {2}".format(name, region, e))
raise
logging.info("Stack {0} created successfully in region {1}".format(name, region))
finally:
unset_credentials()
@retry(
stop_max_attempt_number=10,
wait_fixed=5000,
retry_on_exception=lambda exception: isinstance(exception, ClientError),
)
def delete_stack(self, name, region):
"""Destroy a created cfn stack."""
try:
set_credentials(region, self.__credentials)
id = self.__get_stack_internal_id(name, region)
if id in self.__created_stacks:
logging.info("Destroying stack {0} in region {1}".format(name, region))
try:
stack = self.__created_stacks[id]
cfn_client = boto3.client("cloudformation", region_name=stack.region)
cfn_client.delete_stack(StackName=stack.name)
final_status = self.__wait_for_stack_deletion(stack.cfn_stack_id, cfn_client)
self.__assert_stack_status(final_status, "DELETE_COMPLETE")
except Exception as e:
logging.error(
"Deletion of stack {0} in region {1} failed with exception: {2}".format(name, region, e)
)
raise
del self.__created_stacks[id]
logging.info("Stack {0} deleted successfully in region {1}".format(name, region))
else:
logging.warning(
"Couldn't find stack with name {0} in region {1}. Skipping deletion.".format(name, region)
)
finally:
unset_credentials()
def delete_all_stacks(self):
"""Destroy all created stacks."""
logging.debug("Destroying all cfn stacks")
for _, value in dict(self.__created_stacks).items():
try:
self.delete_stack(value.name, value.region)
except Exception as e:
logging.error(
"Failed when destroying stack {0} in region {1} with exception {2}.".format(
value.name, value.region, e
)
)
@retry(
retry_on_result=lambda result: result == "CREATE_IN_PROGRESS",
wait_fixed=5000,
retry_on_exception=lambda e: False,
)
def __wait_for_stack_creation(self, name, cfn_client):
return self.__get_stack_status(name, cfn_client)
@retry(
retry_on_result=lambda result: result == "DELETE_IN_PROGRESS",
wait_fixed=5000,
retry_on_exception=lambda e: False,
)
def __wait_for_stack_deletion(self, name, cfn_client):
return self.__get_stack_status(name, cfn_client)
@staticmethod
def __get_stack_status(name, cfn_client):
return cfn_client.describe_stacks(StackName=name).get("Stacks")[0].get("StackStatus")
@staticmethod
def __assert_stack_status(status, expected_status):
if status != expected_status:
raise Exception("Stack status {0} differs from expected one {1}".format(status, expected_status))
@staticmethod
def __get_stack_internal_id(name, region):
return name + "-" + region
| 39.066265 | 119 | 0.626831 |
import logging
import boto3
from botocore.exceptions import ClientError
from retrying import retry
from utils import retrieve_cfn_outputs, retrieve_cfn_resources, set_credentials, unset_credentials
class CfnStack:
def __init__(self, name, region, template, parameters=None):
self.name = name
self.region = region
self.template = template
self.parameters = parameters or []
self.cfn_stack_id = None
self.__cfn_outputs = None
self.__cfn_resources = None
@property
def cfn_outputs(self):
if not self.__cfn_outputs:
self.__cfn_outputs = retrieve_cfn_outputs(self.name, self.region)
return self.__cfn_outputs
@property
def cfn_resources(self):
if not self.__cfn_resources:
self.__cfn_resources = retrieve_cfn_resources(self.name, self.region)
return self.__cfn_resources
class CfnStacksFactory:
def __init__(self, credentials):
self.__created_stacks = {}
self.__credentials = credentials
def create_stack(self, stack):
name = stack.name
region = stack.region
try:
set_credentials(region, self.__credentials)
id = self.__get_stack_internal_id(name, region)
if id in self.__created_stacks:
raise ValueError("Stack {0} already exists in region {1}".format(name, region))
logging.info("Creating stack {0} in region {1}".format(name, region))
self.__created_stacks[id] = stack
try:
cfn_client = boto3.client("cloudformation", region_name=region)
result = cfn_client.create_stack(
StackName=name, TemplateBody=stack.template, Parameters=stack.parameters
)
stack.cfn_stack_id = result["StackId"]
final_status = self.__wait_for_stack_creation(stack.cfn_stack_id, cfn_client)
self.__assert_stack_status(final_status, "CREATE_COMPLETE")
except Exception as e:
logging.error("Creation of stack {0} in region {1} failed with exception: {2}".format(name, region, e))
raise
logging.info("Stack {0} created successfully in region {1}".format(name, region))
finally:
unset_credentials()
@retry(
stop_max_attempt_number=10,
wait_fixed=5000,
retry_on_exception=lambda exception: isinstance(exception, ClientError),
)
def delete_stack(self, name, region):
try:
set_credentials(region, self.__credentials)
id = self.__get_stack_internal_id(name, region)
if id in self.__created_stacks:
logging.info("Destroying stack {0} in region {1}".format(name, region))
try:
stack = self.__created_stacks[id]
cfn_client = boto3.client("cloudformation", region_name=stack.region)
cfn_client.delete_stack(StackName=stack.name)
final_status = self.__wait_for_stack_deletion(stack.cfn_stack_id, cfn_client)
self.__assert_stack_status(final_status, "DELETE_COMPLETE")
except Exception as e:
logging.error(
"Deletion of stack {0} in region {1} failed with exception: {2}".format(name, region, e)
)
raise
del self.__created_stacks[id]
logging.info("Stack {0} deleted successfully in region {1}".format(name, region))
else:
logging.warning(
"Couldn't find stack with name {0} in region {1}. Skipping deletion.".format(name, region)
)
finally:
unset_credentials()
def delete_all_stacks(self):
logging.debug("Destroying all cfn stacks")
for _, value in dict(self.__created_stacks).items():
try:
self.delete_stack(value.name, value.region)
except Exception as e:
logging.error(
"Failed when destroying stack {0} in region {1} with exception {2}.".format(
value.name, value.region, e
)
)
@retry(
retry_on_result=lambda result: result == "CREATE_IN_PROGRESS",
wait_fixed=5000,
retry_on_exception=lambda e: False,
)
def __wait_for_stack_creation(self, name, cfn_client):
return self.__get_stack_status(name, cfn_client)
@retry(
retry_on_result=lambda result: result == "DELETE_IN_PROGRESS",
wait_fixed=5000,
retry_on_exception=lambda e: False,
)
def __wait_for_stack_deletion(self, name, cfn_client):
return self.__get_stack_status(name, cfn_client)
@staticmethod
def __get_stack_status(name, cfn_client):
return cfn_client.describe_stacks(StackName=name).get("Stacks")[0].get("StackStatus")
@staticmethod
def __assert_stack_status(status, expected_status):
if status != expected_status:
raise Exception("Stack status {0} differs from expected one {1}".format(status, expected_status))
@staticmethod
def __get_stack_internal_id(name, region):
return name + "-" + region
| true | true |
f721218a181e524dc4105ce1e8ccda9b8507b1c2 | 3,080 | py | Python | blog/blog/settings.py | zhaotao789/blog | de23e5a29b6aae2fc87829833f3fae256c55f5b3 | [
"MIT"
] | null | null | null | blog/blog/settings.py | zhaotao789/blog | de23e5a29b6aae2fc87829833f3fae256c55f5b3 | [
"MIT"
] | null | null | null | blog/blog/settings.py | zhaotao789/blog | de23e5a29b6aae2fc87829833f3fae256c55f5b3 | [
"MIT"
] | null | null | null | """
Django settings for blog project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd120mv4fw)wcwekzk-r^1w5++9e^q_6qteo4-+n8kk4ei%i5$0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| 25.454545 | 91 | 0.696104 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'd120mv4fw)wcwekzk-r^1w5++9e^q_6qteo4-+n8kk4ei%i5$0'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| true | true |
f72121ecda5066fe0ef9c48035c6f35ba4a47c1b | 400 | py | Python | lino_tera/lib/coachings/choicelists.py | khchine5/tera | dd85aaefc2392fa831bcee7c258d37038e32aeb7 | [
"BSD-2-Clause"
] | null | null | null | lino_tera/lib/coachings/choicelists.py | khchine5/tera | dd85aaefc2392fa831bcee7c258d37038e32aeb7 | [
"BSD-2-Clause"
] | null | null | null | lino_tera/lib/coachings/choicelists.py | khchine5/tera | dd85aaefc2392fa831bcee7c258d37038e32aeb7 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: UTF-8 -*-
# Copyright 2017-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""The choicelists for this plugin.
"""
from lino.api import dd, _
class PartnerTariffs(dd.ChoiceList):
verbose_name = _("Client tariff")
verbose_name_plural = _("Client tariffs")
add = PartnerTariffs.add_item
add('10', _("Plain"), 'plain')
add('20', _("Reduced"), 'reduced')
| 18.181818 | 45 | 0.67 |
from lino.api import dd, _
class PartnerTariffs(dd.ChoiceList):
verbose_name = _("Client tariff")
verbose_name_plural = _("Client tariffs")
add = PartnerTariffs.add_item
add('10', _("Plain"), 'plain')
add('20', _("Reduced"), 'reduced')
| true | true |
f7212251e63dcb5ce319603d8ff0812abad4359b | 1,095 | py | Python | synapse/resources/packages-plugin/yum-pkg.py | mrmuxl/synapse-agent | 615ccc8faefa0f7d66d070a7444fe57e67e3bae1 | [
"MIT"
] | 1 | 2016-06-23T05:56:53.000Z | 2016-06-23T05:56:53.000Z | synapse/resources/packages-plugin/yum-pkg.py | mrmuxl/synapse-agent | 615ccc8faefa0f7d66d070a7444fe57e67e3bae1 | [
"MIT"
] | null | null | null | synapse/resources/packages-plugin/yum-pkg.py | mrmuxl/synapse-agent | 615ccc8faefa0f7d66d070a7444fe57e67e3bae1 | [
"MIT"
] | null | null | null | from synapse.syncmd import exec_cmd
from synapse.synapse_exceptions import ResourceException
from synapse.logger import logger
log = logger('yum-pkg')
def install(name):
ret = exec_cmd("/usr/bin/yum -q -y install {0}".format(name))
if ret['returncode'] != 0:
raise ResourceException(ret['stderr'])
def get_installed_packages():
ret = exec_cmd("/bin/rpm -qa")
return ret['stdout'].split('\n')
def remove(name):
ret = exec_cmd("/usr/bin/yum -q -y remove {0}".format(name))
if ret['returncode'] != 0:
raise ResourceException(ret['stderr'])
def update(name):
# We need to check first if the package is installed. yum update of a
# non-existing package has a returncode of 0. We need to raise an exception
# if the package is not installed !
inst = is_installed(name)
ret = exec_cmd("/usr/bin/yum -q -y update {0}".format(name))
if ret['returncode'] != 0 or not inst:
raise ResourceException(ret['stderr'])
def is_installed(name):
ret = exec_cmd("/bin/rpm -q {0}".format(name))
return ret['returncode'] == 0
| 28.076923 | 79 | 0.663014 | from synapse.syncmd import exec_cmd
from synapse.synapse_exceptions import ResourceException
from synapse.logger import logger
log = logger('yum-pkg')
def install(name):
ret = exec_cmd("/usr/bin/yum -q -y install {0}".format(name))
if ret['returncode'] != 0:
raise ResourceException(ret['stderr'])
def get_installed_packages():
ret = exec_cmd("/bin/rpm -qa")
return ret['stdout'].split('\n')
def remove(name):
ret = exec_cmd("/usr/bin/yum -q -y remove {0}".format(name))
if ret['returncode'] != 0:
raise ResourceException(ret['stderr'])
def update(name):
inst = is_installed(name)
ret = exec_cmd("/usr/bin/yum -q -y update {0}".format(name))
if ret['returncode'] != 0 or not inst:
raise ResourceException(ret['stderr'])
def is_installed(name):
ret = exec_cmd("/bin/rpm -q {0}".format(name))
return ret['returncode'] == 0
| true | true |
f7212358f16c2908668c9722bd9e47633e14b4ef | 2,154 | py | Python | sensirion_shdlc_sensorbridge/i2c_errors.py | Sensirion/python-shdlc-sensorbridge | c441c17d89697ecf0f7b61955f54c3da195e30e6 | [
"BSD-3-Clause"
] | null | null | null | sensirion_shdlc_sensorbridge/i2c_errors.py | Sensirion/python-shdlc-sensorbridge | c441c17d89697ecf0f7b61955f54c3da195e30e6 | [
"BSD-3-Clause"
] | 1 | 2021-03-28T22:15:29.000Z | 2021-11-03T09:06:14.000Z | sensirion_shdlc_sensorbridge/i2c_errors.py | Sensirion/python-shdlc-sensorbridge | c441c17d89697ecf0f7b61955f54c3da195e30e6 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# (c) Copyright 2020 Sensirion AG, Switzerland
from __future__ import absolute_import, division, print_function
import logging
log = logging.getLogger(__name__)
class SensorBridgeI2cError(IOError):
"""
I2C transceive error.
"""
def __init__(self, code, message="Unknown"):
super(SensorBridgeI2cError, self).__init__(
"I2C transceive error: {}".format(message)
)
self.error_code = code
self.error_message = message
class SensorBridgeI2cNackError(SensorBridgeI2cError):
"""
I2C transceive NACK error.
"""
def __init__(self):
super(SensorBridgeI2cNackError, self).__init__(
0x01,
"NACK (byte not acknowledged)"
)
class SensorBridgeI2cTimeoutError(SensorBridgeI2cError):
"""
I2C transceive timeout error.
"""
def __init__(self):
super(SensorBridgeI2cTimeoutError, self).__init__(
0x02,
"Timeout"
)
class SensorBridgeI2cTimingError(SensorBridgeI2cError):
"""
I2C repeated transceive timing error.
"""
def __init__(self):
super(SensorBridgeI2cTimingError, self).__init__(
0x03,
"Invalid timing (frequency, interval, timeout or delay)"
)
"""
List containing all I2C errors specified in this file.
"""
SENSORBRIDGE_I2C_ERROR_LIST = [
SensorBridgeI2cNackError(),
SensorBridgeI2cTimeoutError(),
SensorBridgeI2cTimingError(),
]
def i2c_error_from_code(code):
"""
Return the corresponding exception for a given I2C error code.
:param byte code:
Error code as received from the device.
:return:
The exception for the given error code. If code is zero (no error),
None is returned.
:rtype:
None or an instance of
:py:class:`~sensirion_shdlc_sensorbridge.i2c_errors.SensorBridgeI2cError`
""" # noqa: E501
if code == 0:
return None
for error in SENSORBRIDGE_I2C_ERROR_LIST:
if error.error_code == code:
return error
return SensorBridgeI2cError(code) # fallback for unknown error codes
| 25.642857 | 81 | 0.654132 |
from __future__ import absolute_import, division, print_function
import logging
log = logging.getLogger(__name__)
class SensorBridgeI2cError(IOError):
def __init__(self, code, message="Unknown"):
super(SensorBridgeI2cError, self).__init__(
"I2C transceive error: {}".format(message)
)
self.error_code = code
self.error_message = message
class SensorBridgeI2cNackError(SensorBridgeI2cError):
def __init__(self):
super(SensorBridgeI2cNackError, self).__init__(
0x01,
"NACK (byte not acknowledged)"
)
class SensorBridgeI2cTimeoutError(SensorBridgeI2cError):
def __init__(self):
super(SensorBridgeI2cTimeoutError, self).__init__(
0x02,
"Timeout"
)
class SensorBridgeI2cTimingError(SensorBridgeI2cError):
def __init__(self):
super(SensorBridgeI2cTimingError, self).__init__(
0x03,
"Invalid timing (frequency, interval, timeout or delay)"
)
SENSORBRIDGE_I2C_ERROR_LIST = [
SensorBridgeI2cNackError(),
SensorBridgeI2cTimeoutError(),
SensorBridgeI2cTimingError(),
]
def i2c_error_from_code(code):
if code == 0:
return None
for error in SENSORBRIDGE_I2C_ERROR_LIST:
if error.error_code == code:
return error
return SensorBridgeI2cError(code)
| true | true |
f721236e30c2bc62859814934c24d2d0a6124a36 | 1,534 | py | Python | tests/ecr/data_generator/test_vessel_parser.py | zhawan/maro | d8c98deea4296cdcb90efd1fb59bc571cec3a2ef | [
"MIT"
] | null | null | null | tests/ecr/data_generator/test_vessel_parser.py | zhawan/maro | d8c98deea4296cdcb90efd1fb59bc571cec3a2ef | [
"MIT"
] | null | null | null | tests/ecr/data_generator/test_vessel_parser.py | zhawan/maro | d8c98deea4296cdcb90efd1fb59bc571cec3a2ef | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import unittest
import yaml
from maro.data_lib.ecr.vessel_parser import VesselsParser
from maro.data_lib.ecr.entities import VesselSetting
conf_str = """
vessels:
rt1_vessel_001:
capacity: 92400
parking:
duration: 1
noise: 0
route:
initial_port_name: supply_port_001
route_name: route_001
sailing:
noise: 0
speed: 10
rt1_vessel_002:
capacity: 92400
parking:
duration: 1
noise: 0
route:
initial_port_name: demand_port_001
route_name: route_001
sailing:
noise: 0
speed: 10
"""
class TestVesselParser(unittest.TestCase):
def test_vessel_parse(self):
conf = yaml.safe_load(conf_str)
parser = VesselsParser()
vessel_mapping, vessels = parser.parse(conf["vessels"])
self.assertEqual(2, len(vessel_mapping))
self.assertEqual(2, len(vessels))
self.assertEqual("rt1_vessel_001", vessels[0].name)
self.assertEqual("rt1_vessel_002", vessels[1].name)
# check capacity
self.assertListEqual([92400, 92400], [v.capacity for v in vessels])
self.assertListEqual([1, 1], [v.parking_duration for v in vessels])
self.assertListEqual([0, 0], [v.parking_noise for v in vessels])
self.assertListEqual([10, 10], [v.sailing_speed for v in vessels])
self.assertListEqual([0, 0], [v.sailing_noise for v in vessels])
if __name__=="__main__":
unittest.main() | 25.566667 | 75 | 0.666232 |
import unittest
import yaml
from maro.data_lib.ecr.vessel_parser import VesselsParser
from maro.data_lib.ecr.entities import VesselSetting
conf_str = """
vessels:
rt1_vessel_001:
capacity: 92400
parking:
duration: 1
noise: 0
route:
initial_port_name: supply_port_001
route_name: route_001
sailing:
noise: 0
speed: 10
rt1_vessel_002:
capacity: 92400
parking:
duration: 1
noise: 0
route:
initial_port_name: demand_port_001
route_name: route_001
sailing:
noise: 0
speed: 10
"""
class TestVesselParser(unittest.TestCase):
def test_vessel_parse(self):
conf = yaml.safe_load(conf_str)
parser = VesselsParser()
vessel_mapping, vessels = parser.parse(conf["vessels"])
self.assertEqual(2, len(vessel_mapping))
self.assertEqual(2, len(vessels))
self.assertEqual("rt1_vessel_001", vessels[0].name)
self.assertEqual("rt1_vessel_002", vessels[1].name)
self.assertListEqual([92400, 92400], [v.capacity for v in vessels])
self.assertListEqual([1, 1], [v.parking_duration for v in vessels])
self.assertListEqual([0, 0], [v.parking_noise for v in vessels])
self.assertListEqual([10, 10], [v.sailing_speed for v in vessels])
self.assertListEqual([0, 0], [v.sailing_noise for v in vessels])
if __name__=="__main__":
unittest.main() | true | true |
f72123b570cec67b1077598e4da57ff2404e136f | 8,077 | py | Python | corehq/util/es/interface.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | null | null | null | corehq/util/es/interface.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 94 | 2020-12-11T06:57:31.000Z | 2022-03-15T10:24:06.000Z | corehq/util/es/interface.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | null | null | null | import abc
import logging
import traceback
from django.conf import settings
from corehq.pillows.mappings.utils import transform_for_es7
from corehq.util.es.elasticsearch import bulk, scan
class AbstractElasticsearchInterface(metaclass=abc.ABCMeta):
def __init__(self, es):
self.es = es
def get_aliases(self):
return self.es.indices.get_aliases()
def put_mapping(self, doc_type, mapping, index):
return self.es.indices.put_mapping(doc_type, {doc_type: mapping}, index=index)
def _verify_is_alias(self, index_or_alias):
from corehq.elastic import ES_META, ESError
from pillowtop.tests.utils import TEST_ES_ALIAS
all_es_aliases = [index_info.alias for index_info in ES_META.values()] + [TEST_ES_ALIAS]
if index_or_alias not in all_es_aliases:
raise ESError(
f"{index_or_alias} is an unknown alias, query target must be one of {all_es_aliases}")
def update_index_settings(self, index, settings_dict):
assert set(settings_dict.keys()) == {'index'}, settings_dict.keys()
return self.es.indices.put_settings(settings_dict, index=index)
def _get_source(self, index_alias, doc_type, doc_id, source_includes=None):
kwargs = {"_source_include": source_includes} if source_includes else {}
return self.es.get_source(index_alias, doc_type, doc_id, **kwargs)
def doc_exists(self, index_alias, doc_id, doc_type):
return self.es.exists(index_alias, doc_type, doc_id)
def _mget(self, index_alias, body, doc_type):
return self.es.mget(
index=index_alias, doc_type=doc_type, body=body, _source=True)
def get_doc(self, index_alias, doc_type, doc_id, source_includes=None, verify_alias=True):
if verify_alias:
self._verify_is_alias(index_alias)
doc = self._get_source(index_alias, doc_type, doc_id, source_includes=source_includes)
doc['_id'] = doc_id
return doc
def get_bulk_docs(self, index_alias, doc_type, doc_ids, verify_alias=True):
from corehq.elastic import ESError
if verify_alias:
self._verify_is_alias(index_alias)
docs = []
results = self._mget(index_alias=index_alias, doc_type=doc_type, body={'ids': doc_ids})
for doc_result in results['docs']:
if 'error' in doc_result:
raise ESError(doc_result['error'].get('reason', 'error doing bulk get'))
if doc_result['found']:
self._fix_hit(doc_result)
docs.append(doc_result['_source'])
return docs
def index_doc(self, index_alias, doc_type, doc_id, doc, params=None, verify_alias=True):
if verify_alias:
self._verify_is_alias(index_alias)
self.es.index(index_alias, doc_type, body=self._without_id_field(doc), id=doc_id,
params=params or {})
def update_doc_fields(self, index_alias, doc_type, doc_id, fields, params=None, verify_alias=True):
if verify_alias:
self._verify_is_alias(index_alias)
self.es.update(index_alias, doc_type, doc_id, body={"doc": self._without_id_field(fields)},
params=params or {})
def _prepare_count_query(self, query):
# pagination params are not required and not supported in ES count API
query = query.copy()
for extra in ['size', 'sort', 'from', 'to', '_source']:
query.pop(extra, None)
return query
def count(self, index_alias, doc_type, query):
query = self._prepare_count_query(query)
return self.es.count(index=index_alias, doc_type=doc_type, body=query).get('count')
@staticmethod
def _without_id_field(doc):
# Field [_id] is a metadata field and cannot be added inside a document.
# Use the index API request parameters.
return {key: value for key, value in doc.items() if key != '_id'}
def delete_doc(self, index_alias, doc_type, doc_id):
self.es.delete(index_alias, doc_type, doc_id)
def bulk_ops(self, actions, stats_only=False, **kwargs):
for action in actions:
if '_source' in action:
action['_source'] = self._without_id_field(action['_source'])
ret = bulk(self.es, actions, stats_only=stats_only, **kwargs)
return ret
def search(self, index_alias=None, doc_type=None, body=None, params=None, verify_alias=True, **kwargs):
if verify_alias:
self._verify_is_alias(index_alias)
results = self.es.search(index=index_alias, doc_type=doc_type, body=body, params=params or {}, **kwargs)
self._fix_hits_in_results(results)
return results
def scroll(self, scroll_id=None, body=None, params=None, **kwargs):
results = self.es.scroll(scroll_id, body, params=params or {}, **kwargs)
self._fix_hits_in_results(results)
return results
def scan(self, index_alias, query, doc_type):
return scan(self.es, query=query, index=index_alias, doc_type=doc_type, search_type='scan')
@staticmethod
def _fix_hit(hit):
if '_source' in hit:
hit['_source']['_id'] = hit['_id']
def _fix_hits_in_results(self, results):
try:
hits = results['hits']['hits']
except KeyError:
return results
for hit in hits:
self._fix_hit(hit)
total = results['hits']['total']
# In ES7 total is a dict
if isinstance(total, dict):
results['hits']['total'] = total.get('value', 0)
class ElasticsearchInterfaceDefault(AbstractElasticsearchInterface):
pass
class ElasticsearchInterface7(AbstractElasticsearchInterface):
def get_aliases(self):
return self.es.indices.get_alias()
def search(self, index_alias=None, doc_type=None, body=None, params=None, verify_alias=True, **kwargs):
if verify_alias:
self._verify_is_alias(index_alias)
results = self.es.search(index=index_alias, body=body, params=params or {}, **kwargs)
self._fix_hits_in_results(results)
return results
def put_mapping(self, doc_type, mapping, index):
mapping = transform_for_es7(mapping)
return self.es.indices.put_mapping(mapping, index=index)
def doc_exists(self, index_alias, doc_id, doc_type):
return self.es.exists(index_alias, doc_id)
def _get_source(self, index_alias, doc_type, doc_id, source_includes=None):
kwargs = {"_source_includes": source_includes} if source_includes else {}
return self.es.get_source(index_alias, doc_id, **kwargs)
def _mget(self, index_alias, body, doc_type):
return self.es.mget(
index=index_alias, body=body, _source=True)
def index_doc(self, index_alias, doc_type, doc_id, doc, params=None, verify_alias=True):
if verify_alias:
self._verify_is_alias(index_alias)
params = params or {}
# not supported in ES7
params.pop('retry_on_conflict', None)
self.es.index(index_alias, body=self._without_id_field(doc), id=doc_id,
params=params)
def update_doc_fields(self, index_alias, doc_type, doc_id, fields, params=None, verify_alias=True):
if verify_alias:
self._verify_is_alias(index_alias)
self.es.update(index_alias, doc_id, body={"doc": self._without_id_field(fields)},
params=params or {})
def delete_doc(self, index_alias, doc_type, doc_id):
self.es.delete(index_alias, doc_id)
def count(self, index_alias, doc_type, query):
query = self._prepare_count_query(query)
return self.es.count(index=index_alias, body=query).get('count')
def scan(self, index_alias, query, doc_type):
query["sort"] = "_doc"
return scan(self.es, query=query, index=index_alias)
ElasticsearchInterface = {
1: ElasticsearchInterfaceDefault,
2: ElasticsearchInterfaceDefault,
7: ElasticsearchInterface7,
}[settings.ELASTICSEARCH_MAJOR_VERSION]
| 39.985149 | 112 | 0.668317 | import abc
import logging
import traceback
from django.conf import settings
from corehq.pillows.mappings.utils import transform_for_es7
from corehq.util.es.elasticsearch import bulk, scan
class AbstractElasticsearchInterface(metaclass=abc.ABCMeta):
def __init__(self, es):
self.es = es
def get_aliases(self):
return self.es.indices.get_aliases()
def put_mapping(self, doc_type, mapping, index):
return self.es.indices.put_mapping(doc_type, {doc_type: mapping}, index=index)
def _verify_is_alias(self, index_or_alias):
from corehq.elastic import ES_META, ESError
from pillowtop.tests.utils import TEST_ES_ALIAS
all_es_aliases = [index_info.alias for index_info in ES_META.values()] + [TEST_ES_ALIAS]
if index_or_alias not in all_es_aliases:
raise ESError(
f"{index_or_alias} is an unknown alias, query target must be one of {all_es_aliases}")
def update_index_settings(self, index, settings_dict):
assert set(settings_dict.keys()) == {'index'}, settings_dict.keys()
return self.es.indices.put_settings(settings_dict, index=index)
def _get_source(self, index_alias, doc_type, doc_id, source_includes=None):
kwargs = {"_source_include": source_includes} if source_includes else {}
return self.es.get_source(index_alias, doc_type, doc_id, **kwargs)
def doc_exists(self, index_alias, doc_id, doc_type):
return self.es.exists(index_alias, doc_type, doc_id)
def _mget(self, index_alias, body, doc_type):
return self.es.mget(
index=index_alias, doc_type=doc_type, body=body, _source=True)
def get_doc(self, index_alias, doc_type, doc_id, source_includes=None, verify_alias=True):
if verify_alias:
self._verify_is_alias(index_alias)
doc = self._get_source(index_alias, doc_type, doc_id, source_includes=source_includes)
doc['_id'] = doc_id
return doc
def get_bulk_docs(self, index_alias, doc_type, doc_ids, verify_alias=True):
from corehq.elastic import ESError
if verify_alias:
self._verify_is_alias(index_alias)
docs = []
results = self._mget(index_alias=index_alias, doc_type=doc_type, body={'ids': doc_ids})
for doc_result in results['docs']:
if 'error' in doc_result:
raise ESError(doc_result['error'].get('reason', 'error doing bulk get'))
if doc_result['found']:
self._fix_hit(doc_result)
docs.append(doc_result['_source'])
return docs
def index_doc(self, index_alias, doc_type, doc_id, doc, params=None, verify_alias=True):
if verify_alias:
self._verify_is_alias(index_alias)
self.es.index(index_alias, doc_type, body=self._without_id_field(doc), id=doc_id,
params=params or {})
def update_doc_fields(self, index_alias, doc_type, doc_id, fields, params=None, verify_alias=True):
if verify_alias:
self._verify_is_alias(index_alias)
self.es.update(index_alias, doc_type, doc_id, body={"doc": self._without_id_field(fields)},
params=params or {})
def _prepare_count_query(self, query):
query = query.copy()
for extra in ['size', 'sort', 'from', 'to', '_source']:
query.pop(extra, None)
return query
def count(self, index_alias, doc_type, query):
query = self._prepare_count_query(query)
return self.es.count(index=index_alias, doc_type=doc_type, body=query).get('count')
@staticmethod
def _without_id_field(doc):
return {key: value for key, value in doc.items() if key != '_id'}
def delete_doc(self, index_alias, doc_type, doc_id):
self.es.delete(index_alias, doc_type, doc_id)
def bulk_ops(self, actions, stats_only=False, **kwargs):
for action in actions:
if '_source' in action:
action['_source'] = self._without_id_field(action['_source'])
ret = bulk(self.es, actions, stats_only=stats_only, **kwargs)
return ret
def search(self, index_alias=None, doc_type=None, body=None, params=None, verify_alias=True, **kwargs):
if verify_alias:
self._verify_is_alias(index_alias)
results = self.es.search(index=index_alias, doc_type=doc_type, body=body, params=params or {}, **kwargs)
self._fix_hits_in_results(results)
return results
def scroll(self, scroll_id=None, body=None, params=None, **kwargs):
results = self.es.scroll(scroll_id, body, params=params or {}, **kwargs)
self._fix_hits_in_results(results)
return results
def scan(self, index_alias, query, doc_type):
return scan(self.es, query=query, index=index_alias, doc_type=doc_type, search_type='scan')
@staticmethod
def _fix_hit(hit):
if '_source' in hit:
hit['_source']['_id'] = hit['_id']
def _fix_hits_in_results(self, results):
try:
hits = results['hits']['hits']
except KeyError:
return results
for hit in hits:
self._fix_hit(hit)
total = results['hits']['total']
if isinstance(total, dict):
results['hits']['total'] = total.get('value', 0)
class ElasticsearchInterfaceDefault(AbstractElasticsearchInterface):
pass
class ElasticsearchInterface7(AbstractElasticsearchInterface):
def get_aliases(self):
return self.es.indices.get_alias()
def search(self, index_alias=None, doc_type=None, body=None, params=None, verify_alias=True, **kwargs):
if verify_alias:
self._verify_is_alias(index_alias)
results = self.es.search(index=index_alias, body=body, params=params or {}, **kwargs)
self._fix_hits_in_results(results)
return results
def put_mapping(self, doc_type, mapping, index):
mapping = transform_for_es7(mapping)
return self.es.indices.put_mapping(mapping, index=index)
def doc_exists(self, index_alias, doc_id, doc_type):
return self.es.exists(index_alias, doc_id)
def _get_source(self, index_alias, doc_type, doc_id, source_includes=None):
kwargs = {"_source_includes": source_includes} if source_includes else {}
return self.es.get_source(index_alias, doc_id, **kwargs)
def _mget(self, index_alias, body, doc_type):
return self.es.mget(
index=index_alias, body=body, _source=True)
def index_doc(self, index_alias, doc_type, doc_id, doc, params=None, verify_alias=True):
if verify_alias:
self._verify_is_alias(index_alias)
params = params or {}
params.pop('retry_on_conflict', None)
self.es.index(index_alias, body=self._without_id_field(doc), id=doc_id,
params=params)
def update_doc_fields(self, index_alias, doc_type, doc_id, fields, params=None, verify_alias=True):
if verify_alias:
self._verify_is_alias(index_alias)
self.es.update(index_alias, doc_id, body={"doc": self._without_id_field(fields)},
params=params or {})
def delete_doc(self, index_alias, doc_type, doc_id):
self.es.delete(index_alias, doc_id)
def count(self, index_alias, doc_type, query):
query = self._prepare_count_query(query)
return self.es.count(index=index_alias, body=query).get('count')
def scan(self, index_alias, query, doc_type):
query["sort"] = "_doc"
return scan(self.es, query=query, index=index_alias)
ElasticsearchInterface = {
1: ElasticsearchInterfaceDefault,
2: ElasticsearchInterfaceDefault,
7: ElasticsearchInterface7,
}[settings.ELASTICSEARCH_MAJOR_VERSION]
| true | true |
f72125bd49bf7f1f45aab75707173700a233a682 | 2,262 | py | Python | brave/overlays/effect.py | datagutt/brave | 5b4de55146645f96870ffc544859e6f2bb9ec735 | [
"Apache-2.0"
] | 572 | 2018-10-25T10:52:21.000Z | 2022-03-09T18:02:20.000Z | brave/overlays/effect.py | datagutt/brave | 5b4de55146645f96870ffc544859e6f2bb9ec735 | [
"Apache-2.0"
] | 50 | 2018-11-06T08:53:27.000Z | 2022-01-04T17:00:37.000Z | brave/overlays/effect.py | datagutt/brave | 5b4de55146645f96870ffc544859e6f2bb9ec735 | [
"Apache-2.0"
] | 130 | 2018-11-01T14:50:46.000Z | 2022-03-10T20:31:41.000Z | from brave.overlays.overlay import Overlay
from gi.repository import Gst
class EffectOverlay(Overlay):
'''
For doing applying a video effect.
'''
def permitted_props(self):
return {
**super().permitted_props(),
'effect_name': {
'type': 'str',
'default': 'edgetv',
'permitted_values': {
'agingtv': 'AgingTV effect',
'burn': 'Burn',
'chromium': 'Chromium',
'dicetv': 'DiceTV effect',
'dilate': 'Dilate',
'dodge': 'Dodge',
'edgetv': 'EdgeTV effect',
'exclusion': 'Exclusion',
'optv': 'OpTV effect',
'radioactv': 'RadioacTV effect',
'revtv': 'RevTV effect',
'rippletv': 'RippleTV effect',
'solarize': 'Solarize',
'streaktv': 'StreakTV effect',
'vertigotv': 'VertigoTV effect',
'warptv': 'WarpTV effect'
# Note: quarktv and shagadelictv are removed as they were unreliable in testing
}
},
'visible': {
'type': 'bool',
'default': False
}
}
def create_elements(self):
# The effects filters can mess with the alpha channel.
# The best solution I've found is to allow it to move into RGBx, then force a detour via RGB
# to remove the alpha channel, before moving back to our default RGBA.
# This is done in a 'bin' so that the overlay can be manipulated as one thing.
desc = ('videoconvert ! %s ! videoconvert ! capsfilter caps="video/x-raw,format=RGB" ! '
'videoconvert ! capsfilter caps="video/x-raw,format=RGBA"') % self.effect_name
self.element = Gst.parse_bin_from_description(desc, True)
self.element.set_name('%s_bin' % self.uid)
place_to_add_elements = getattr(self.source, 'final_video_tee').parent
if not place_to_add_elements.add(self.element):
self.logger.warning('Unable to add effect overlay bin to the source pipeline')
| 41.888889 | 100 | 0.525199 | from brave.overlays.overlay import Overlay
from gi.repository import Gst
class EffectOverlay(Overlay):
def permitted_props(self):
return {
**super().permitted_props(),
'effect_name': {
'type': 'str',
'default': 'edgetv',
'permitted_values': {
'agingtv': 'AgingTV effect',
'burn': 'Burn',
'chromium': 'Chromium',
'dicetv': 'DiceTV effect',
'dilate': 'Dilate',
'dodge': 'Dodge',
'edgetv': 'EdgeTV effect',
'exclusion': 'Exclusion',
'optv': 'OpTV effect',
'radioactv': 'RadioacTV effect',
'revtv': 'RevTV effect',
'rippletv': 'RippleTV effect',
'solarize': 'Solarize',
'streaktv': 'StreakTV effect',
'vertigotv': 'VertigoTV effect',
'warptv': 'WarpTV effect'
}
},
'visible': {
'type': 'bool',
'default': False
}
}
def create_elements(self):
# to remove the alpha channel, before moving back to our default RGBA.
# This is done in a 'bin' so that the overlay can be manipulated as one thing.
desc = ('videoconvert ! %s ! videoconvert ! capsfilter caps="video/x-raw,format=RGB" ! '
'videoconvert ! capsfilter caps="video/x-raw,format=RGBA"') % self.effect_name
self.element = Gst.parse_bin_from_description(desc, True)
self.element.set_name('%s_bin' % self.uid)
place_to_add_elements = getattr(self.source, 'final_video_tee').parent
if not place_to_add_elements.add(self.element):
self.logger.warning('Unable to add effect overlay bin to the source pipeline')
| true | true |
f7212703196fd6c35cdef4b889edc2bf6b134e91 | 7,399 | py | Python | pytest_testrail/conftest.py | harmonm/pytest-testrail | cfd667b33cc857dd65c8531823859cd871aff525 | [
"MIT"
] | null | null | null | pytest_testrail/conftest.py | harmonm/pytest-testrail | cfd667b33cc857dd65c8531823859cd871aff525 | [
"MIT"
] | null | null | null | pytest_testrail/conftest.py | harmonm/pytest-testrail | cfd667b33cc857dd65c8531823859cd871aff525 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
import os
import sys
if sys.version_info.major == 2:
# python2
import ConfigParser as configparser
else:
# python3
import configparser
from .plugin import PyTestRailPlugin
from .testrail_api import APIClient
def pytest_addoption(parser):
group = parser.getgroup('testrail')
group.addoption(
'--testrail',
action='store_true',
help='Create and update testruns with TestRail')
group.addoption(
'--tr-config',
action='store',
default='testrail.cfg',
help='Path to the config file containing information about the TestRail server (defaults to testrail.cfg)')
group.addoption(
'--tr-url',
action='store',
help='TestRail address you use to access TestRail with your web browser (config file: url in API section)')
group.addoption(
'--tr-email',
action='store',
help='Email for the account on the TestRail server (config file: email in API section)')
group.addoption(
'--tr-password',
action='store',
help='Password for the account on the TestRail server (config file: password in API section)')
group.addoption(
'--tr-testrun-assignedto-id',
action='store',
help='ID of the user assigned to the test run (config file: assignedto_id in TESTRUN section)')
group.addoption(
'--tr-testrun-project-id',
action='store',
help='ID of the project the test run is in (config file: project_id in TESTRUN section)')
group.addoption(
'--tr-testrun-suite-id',
action='store',
help='ID of the test suite containing the test cases (config file: suite_id in TESTRUN section)')
group.addoption(
'--tr-testrun-suite-include-all',
action='store_true',
default=None,
help='Include all test cases in specified test suite when creating test run (config file: include_all in TESTRUN section)')
group.addoption(
'--tr-testrun-name',
action='store',
default=None,
help='Name given to testrun, that appears in TestRail (config file: name in TESTRUN section)')
group.addoption(
'--tr-run-id',
action='store',
default=0,
required=False,
help='Identifier of testrun, that appears in TestRail. If provided, option "--tr-testrun-name" will be ignored')
group.addoption(
'--tr-plan-id',
action='store',
default=0,
required=False,
help='Identifier of testplan, that appears in TestRail. If provided, option "--tr-testrun-name" will be ignored')
group.addoption(
'--tr-version',
action='store',
default='',
required=False,
help='Indicate a version in Test Case result')
group.addoption(
'--tr-no-ssl-cert-check',
action='store_false',
default=None,
help='Do not check for valid SSL certificate on TestRail host')
group.addoption(
'--tr-close-on-complete',
action='store_true',
default=False,
required=False,
help='Close a test run on completion')
group.addoption(
'--tr-dont-publish-blocked',
action='store_false',
required=False,
help='Determine if results of "blocked" testcases (in TestRail) are published or not')
group.addoption(
'--tr-skip-missing',
action='store_true',
required=False,
help='Skip test cases that are not present in testrun'),
group.addoption(
"--tr-add-passes",
action="store",
default=None,
required=False,
help="Add passing results, default is False"
),
group.addoption(
'--tr-testrun-milestone-id',
action='store',
help='Identifier for milestone, that appears in TestRail. If provided, testrun will be associated with milestone'
)
def pytest_configure(config):
if config.getoption('--testrail'):
cfg_file_path = config.getoption('--tr-config')
config_manager = ConfigManager(cfg_file_path, config)
client = APIClient(config_manager.getoption('tr-url', 'url', 'API'),
config_manager.getoption('tr-email', 'email', 'API'),
config_manager.getoption('tr-password', 'password', 'API'))
config.pluginmanager.register(
PyTestRailPlugin(
client=client,
assign_user_id=config_manager.getoption('tr-testrun-assignedto-id', 'assignedto_id', 'TESTRUN'),
project_id=config_manager.getoption('tr-testrun-project-id', 'project_id', 'TESTRUN'),
suite_id=config_manager.getoption('tr-testrun-suite-id', 'suite_id', 'TESTRUN'),
include_all=config_manager.getoption('tr-testrun-suite-include-all', 'include_all', 'TESTRUN', is_bool=True, default=False),
cert_check=config_manager.getoption('tr-no-ssl-cert-check', 'no_ssl_cert_check', 'API', is_bool=True, default=True),
tr_name=config_manager.getoption('tr-testrun-name', 'name', 'TESTRUN'),
milestone_id=config_manager.getoption('tr-testrun-milestone-id', 'milestone_id', 'TESTRUN'),
run_id=config.getoption('--tr-run-id'),
plan_id=config.getoption('--tr-plan-id'),
version=config.getoption('--tr-version'),
close_on_complete=config.getoption('--tr-close-on-complete'),
publish_blocked=config.getoption('--tr-dont-publish-blocked'),
skip_missing=config.getoption('--tr-skip-missing'),
add_passes=config_manager.getoption("tr-add-passes", "add_passes", "TESTRUN", is_bool=True, default=None)
),
# Name of plugin instance (allow to be used by other plugins)
name="pytest-testrail-instance"
)
class ConfigManager(object):
def __init__(self, cfg_file_path, config):
'''
Handles retrieving configuration values. Config options set in flags are given preferance over options set in the
config file.
:param cfg_file_path: Path to the config file containing information about the TestRail server.
:type cfg_file_path: str or None
:param config: Config object containing commandline flag options.
:type config: _pytest.config.Config
'''
self.cfg_file = None
if os.path.isfile(cfg_file_path) or os.path.islink(cfg_file_path):
self.cfg_file = configparser.ConfigParser()
self.cfg_file.read(cfg_file_path)
self.config = config
def getoption(self, flag, cfg_name, section=None, is_bool=False, default=None):
# priority: cli > config file > default
# 1. return cli option (if set)
value = self.config.getoption('--{}'.format(flag))
if value is not None:
return value
# 2. return default if not config file path is specified
if section is None or self.cfg_file is None:
return default
if self.cfg_file.has_option(section, cfg_name):
# 3. return config file value
return self.cfg_file.getboolean(section, cfg_name) if is_bool else self.cfg_file.get(section, cfg_name)
else:
# 4. if entry not found in config file
return default
| 41.105556 | 140 | 0.626571 |
import os
import sys
if sys.version_info.major == 2:
import ConfigParser as configparser
else:
import configparser
from .plugin import PyTestRailPlugin
from .testrail_api import APIClient
def pytest_addoption(parser):
group = parser.getgroup('testrail')
group.addoption(
'--testrail',
action='store_true',
help='Create and update testruns with TestRail')
group.addoption(
'--tr-config',
action='store',
default='testrail.cfg',
help='Path to the config file containing information about the TestRail server (defaults to testrail.cfg)')
group.addoption(
'--tr-url',
action='store',
help='TestRail address you use to access TestRail with your web browser (config file: url in API section)')
group.addoption(
'--tr-email',
action='store',
help='Email for the account on the TestRail server (config file: email in API section)')
group.addoption(
'--tr-password',
action='store',
help='Password for the account on the TestRail server (config file: password in API section)')
group.addoption(
'--tr-testrun-assignedto-id',
action='store',
help='ID of the user assigned to the test run (config file: assignedto_id in TESTRUN section)')
group.addoption(
'--tr-testrun-project-id',
action='store',
help='ID of the project the test run is in (config file: project_id in TESTRUN section)')
group.addoption(
'--tr-testrun-suite-id',
action='store',
help='ID of the test suite containing the test cases (config file: suite_id in TESTRUN section)')
group.addoption(
'--tr-testrun-suite-include-all',
action='store_true',
default=None,
help='Include all test cases in specified test suite when creating test run (config file: include_all in TESTRUN section)')
group.addoption(
'--tr-testrun-name',
action='store',
default=None,
help='Name given to testrun, that appears in TestRail (config file: name in TESTRUN section)')
group.addoption(
'--tr-run-id',
action='store',
default=0,
required=False,
help='Identifier of testrun, that appears in TestRail. If provided, option "--tr-testrun-name" will be ignored')
group.addoption(
'--tr-plan-id',
action='store',
default=0,
required=False,
help='Identifier of testplan, that appears in TestRail. If provided, option "--tr-testrun-name" will be ignored')
group.addoption(
'--tr-version',
action='store',
default='',
required=False,
help='Indicate a version in Test Case result')
group.addoption(
'--tr-no-ssl-cert-check',
action='store_false',
default=None,
help='Do not check for valid SSL certificate on TestRail host')
group.addoption(
'--tr-close-on-complete',
action='store_true',
default=False,
required=False,
help='Close a test run on completion')
group.addoption(
'--tr-dont-publish-blocked',
action='store_false',
required=False,
help='Determine if results of "blocked" testcases (in TestRail) are published or not')
group.addoption(
'--tr-skip-missing',
action='store_true',
required=False,
help='Skip test cases that are not present in testrun'),
group.addoption(
"--tr-add-passes",
action="store",
default=None,
required=False,
help="Add passing results, default is False"
),
group.addoption(
'--tr-testrun-milestone-id',
action='store',
help='Identifier for milestone, that appears in TestRail. If provided, testrun will be associated with milestone'
)
def pytest_configure(config):
if config.getoption('--testrail'):
cfg_file_path = config.getoption('--tr-config')
config_manager = ConfigManager(cfg_file_path, config)
client = APIClient(config_manager.getoption('tr-url', 'url', 'API'),
config_manager.getoption('tr-email', 'email', 'API'),
config_manager.getoption('tr-password', 'password', 'API'))
config.pluginmanager.register(
PyTestRailPlugin(
client=client,
assign_user_id=config_manager.getoption('tr-testrun-assignedto-id', 'assignedto_id', 'TESTRUN'),
project_id=config_manager.getoption('tr-testrun-project-id', 'project_id', 'TESTRUN'),
suite_id=config_manager.getoption('tr-testrun-suite-id', 'suite_id', 'TESTRUN'),
include_all=config_manager.getoption('tr-testrun-suite-include-all', 'include_all', 'TESTRUN', is_bool=True, default=False),
cert_check=config_manager.getoption('tr-no-ssl-cert-check', 'no_ssl_cert_check', 'API', is_bool=True, default=True),
tr_name=config_manager.getoption('tr-testrun-name', 'name', 'TESTRUN'),
milestone_id=config_manager.getoption('tr-testrun-milestone-id', 'milestone_id', 'TESTRUN'),
run_id=config.getoption('--tr-run-id'),
plan_id=config.getoption('--tr-plan-id'),
version=config.getoption('--tr-version'),
close_on_complete=config.getoption('--tr-close-on-complete'),
publish_blocked=config.getoption('--tr-dont-publish-blocked'),
skip_missing=config.getoption('--tr-skip-missing'),
add_passes=config_manager.getoption("tr-add-passes", "add_passes", "TESTRUN", is_bool=True, default=None)
),
name="pytest-testrail-instance"
)
class ConfigManager(object):
def __init__(self, cfg_file_path, config):
self.cfg_file = None
if os.path.isfile(cfg_file_path) or os.path.islink(cfg_file_path):
self.cfg_file = configparser.ConfigParser()
self.cfg_file.read(cfg_file_path)
self.config = config
def getoption(self, flag, cfg_name, section=None, is_bool=False, default=None):
value = self.config.getoption('--{}'.format(flag))
if value is not None:
return value
if section is None or self.cfg_file is None:
return default
if self.cfg_file.has_option(section, cfg_name):
return self.cfg_file.getboolean(section, cfg_name) if is_bool else self.cfg_file.get(section, cfg_name)
else:
return default
| true | true |
f72128027575513090564f54bc3c085deb980059 | 666 | py | Python | lldb/test/API/lang/swift/po/sys_types/TestSwiftPOSysTypes.py | LaudateCorpus1/llvm-project-staging | cc926dc3a87af7023aa9b6c392347a0a8ed6949b | [
"Apache-2.0"
] | 2 | 2021-11-20T04:04:47.000Z | 2022-01-06T07:44:23.000Z | lldb/test/API/lang/swift/po/sys_types/TestSwiftPOSysTypes.py | LaudateCorpus1/llvm-project-staging | cc926dc3a87af7023aa9b6c392347a0a8ed6949b | [
"Apache-2.0"
] | null | null | null | lldb/test/API/lang/swift/po/sys_types/TestSwiftPOSysTypes.py | LaudateCorpus1/llvm-project-staging | cc926dc3a87af7023aa9b6c392347a0a8ed6949b | [
"Apache-2.0"
] | null | null | null | # TestSwiftPOSysTypes.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
import lldbsuite.test.lldbinline as lldbinline
from lldbsuite.test.decorators import *
lldbinline.MakeInlineTest(__file__, globals(),
decorators=[swiftTest,skipIf(oslist=['windows'])])
| 39.176471 | 80 | 0.657658 |
import lldbsuite.test.lldbinline as lldbinline
from lldbsuite.test.decorators import *
lldbinline.MakeInlineTest(__file__, globals(),
decorators=[swiftTest,skipIf(oslist=['windows'])])
| true | true |
f721282c9bbbd6198fca4dcb39f852eed304a1be | 16,248 | py | Python | brian2/codegen/generators/numpy_generator.py | SimonAltrogge/brian2 | 6463c368a8277041051bf5ae4816f0dd5b6e057c | [
"BSD-2-Clause"
] | 674 | 2015-01-14T11:05:39.000Z | 2022-03-29T04:53:50.000Z | brian2/codegen/generators/numpy_generator.py | JongwanKim2090/brian2 | c212a57cb992b766786b5769ebb830ff12d8a8ad | [
"BSD-2-Clause"
] | 937 | 2015-01-05T13:24:22.000Z | 2022-03-25T13:10:13.000Z | brian2/codegen/generators/numpy_generator.py | JongwanKim2090/brian2 | c212a57cb992b766786b5769ebb830ff12d8a8ad | [
"BSD-2-Clause"
] | 237 | 2015-01-05T13:54:16.000Z | 2022-03-15T22:16:32.000Z |
import itertools
import numpy as np
from brian2.parsing.bast import brian_dtype_from_dtype
from brian2.parsing.rendering import NumpyNodeRenderer
from brian2.core.functions import DEFAULT_FUNCTIONS, timestep
from brian2.core.variables import ArrayVariable
from brian2.utils.stringtools import get_identifiers, word_substitute, indent
from brian2.utils.logger import get_logger
from .base import CodeGenerator
__all__ = ['NumpyCodeGenerator']
logger = get_logger(__name__)
class VectorisationError(Exception):
pass
class NumpyCodeGenerator(CodeGenerator):
"""
Numpy language
Essentially Python but vectorised.
"""
class_name = 'numpy'
_use_ufunc_at_vectorisation = True # allow this to be off for testing only
def translate_expression(self, expr):
expr = word_substitute(expr, self.func_name_replacements)
return NumpyNodeRenderer(auto_vectorise=self.auto_vectorise).render_expr(expr, self.variables).strip()
def translate_statement(self, statement):
# TODO: optimisation, translate arithmetic to a sequence of inplace
# operations like a=b+c -> add(b, c, a)
var, op, expr, comment = (statement.var, statement.op,
statement.expr, statement.comment)
if op == ':=':
op = '='
# For numpy we replace complex expressions involving a single boolean variable into a
# where(boolvar, expr_if_true, expr_if_false)
if (statement.used_boolean_variables is not None and len(statement.used_boolean_variables)==1
and brian_dtype_from_dtype(statement.dtype)=='float'
and statement.complexity_std>sum(statement.complexities.values())):
used_boolvars = statement.used_boolean_variables
bool_simp = statement.boolean_simplified_expressions
boolvar = used_boolvars[0]
for bool_assigns, simp_expr in bool_simp.items():
_, boolval = bool_assigns[0]
if boolval:
expr_true = simp_expr
else:
expr_false = simp_expr
code = f'{var} {op} _numpy.where({boolvar}, {expr_true}, {expr_false})'
else:
code = f"{var} {op} {self.translate_expression(expr)}"
if len(comment):
code += f" # {comment}"
return code
def ufunc_at_vectorisation(self, statement, variables, indices,
conditional_write_vars, created_vars, used_variables):
if not self._use_ufunc_at_vectorisation:
raise VectorisationError()
# Avoids circular import
from brian2.devices.device import device
# See https://github.com/brian-team/brian2/pull/531 for explanation
used = set(get_identifiers(statement.expr))
used = used.intersection(k for k in list(variables.keys()) if k in indices and indices[k]!='_idx')
used_variables.update(used)
if statement.var in used_variables:
raise VectorisationError()
expr = NumpyNodeRenderer(auto_vectorise=self.auto_vectorise).render_expr(statement.expr)
if statement.op == ':=' or indices[statement.var] == '_idx' or not statement.inplace:
if statement.op == ':=':
op = '='
else:
op = statement.op
line = f'{statement.var} {op} {expr}'
elif statement.inplace:
if statement.op == '+=':
ufunc_name = '_numpy.add'
elif statement.op == '*=':
ufunc_name = '_numpy.multiply'
elif statement.op == '/=':
ufunc_name = '_numpy.divide'
elif statement.op == '-=':
ufunc_name = '_numpy.subtract'
else:
raise VectorisationError()
array_name = device.get_array_name(variables[statement.var])
idx = indices[statement.var]
line = f'{ufunc_name}.at({array_name}, {idx}, {expr})'
line = self.conditional_write(line, statement, variables,
conditional_write_vars=conditional_write_vars,
created_vars=created_vars)
else:
raise VectorisationError()
if len(statement.comment):
line += f" # {statement.comment}"
return line
def vectorise_code(self, statements, variables, variable_indices, index='_idx'):
created_vars = {stmt.var for stmt in statements if stmt.op == ':='}
try:
lines = []
used_variables = set()
for statement in statements:
lines.append(f'# Abstract code: {statement.var} {statement.op} {statement.expr}')
# We treat every statement individually with its own read and write code
# to be on the safe side
read, write, indices, conditional_write_vars = self.arrays_helper([statement])
# We make sure that we only add code to `lines` after it went
# through completely
ufunc_lines = []
# No need to load a variable if it is only in read because of
# the in-place operation
if (statement.inplace and
variable_indices[statement.var] != '_idx' and
statement.var not in get_identifiers(statement.expr)):
read = read - {statement.var}
ufunc_lines.extend(self.read_arrays(read, write, indices,
variables, variable_indices))
ufunc_lines.append(self.ufunc_at_vectorisation(statement,
variables,
variable_indices,
conditional_write_vars,
created_vars,
used_variables,
))
# Do not write back such values, the ufuncs have modified the
# underlying array already
if statement.inplace and variable_indices[statement.var] != '_idx':
write = write - {statement.var}
ufunc_lines.extend(self.write_arrays([statement], read, write,
variables,
variable_indices))
lines.extend(ufunc_lines)
except VectorisationError:
if self._use_ufunc_at_vectorisation:
logger.info("Failed to vectorise code, falling back on Python loop: note that "
"this will be very slow! Switch to another code generation target for "
"best performance (e.g. cython). First line is: "+str(statements[0]),
once=True)
lines = []
lines.extend(['_full_idx = _idx',
'for _idx in _full_idx:',
' _vectorisation_idx = _idx'
])
read, write, indices, conditional_write_vars = self.arrays_helper(statements)
lines.extend(indent(code) for code in
self.read_arrays(read, write, indices,
variables, variable_indices))
for statement in statements:
line = self.translate_statement(statement)
if statement.var in conditional_write_vars:
lines.append(indent(f'if {conditional_write_vars[statement.var]}:'))
lines.append(indent(line, 2))
else:
lines.append(indent(line))
lines.extend(indent(code) for code in
self.write_arrays(statements, read, write,
variables, variable_indices))
return lines
def read_arrays(self, read, write, indices, variables, variable_indices):
# index and read arrays (index arrays first)
lines = []
for varname in itertools.chain(indices, read):
var = variables[varname]
index = variable_indices[varname]
# if index in iterate_all:
# line = '{varname} = {array_name}'
# else:
# line = '{varname} = {array_name}.take({index})'
# line = line.format(varname=varname, array_name=self.get_array_name(var), index=index)
line = f"{varname} = {self.get_array_name(var)}"
if not index in self.iterate_all:
line += f"[{index}]"
elif varname in write:
# avoid potential issues with aliased variables, see github #259
line += '.copy()'
lines.append(line)
return lines
def write_arrays(self, statements, read, write, variables, variable_indices):
# write arrays
lines = []
for varname in write:
var = variables[varname]
index_var = variable_indices[varname]
# check if all operations were inplace and we're operating on the
# whole vector, if so we don't need to write the array back
if index_var not in self.iterate_all or varname in read:
all_inplace = False
else:
all_inplace = True
for stmt in statements:
if stmt.var == varname and not stmt.inplace:
all_inplace = False
break
if not all_inplace:
line = self.get_array_name(var)
if index_var in self.iterate_all:
line = f"{line}[:]"
else:
line = f"{line}[{index_var}]"
line = f"{line} = {varname}"
lines.append(line)
return lines
def conditional_write(self, line, stmt, variables, conditional_write_vars,
created_vars):
if stmt.var in conditional_write_vars:
subs = {}
index = conditional_write_vars[stmt.var]
# we replace all var with var[index], but actually we use this repl_string first because
# we don't want to end up with lines like x[not_refractory[not_refractory]] when
# multiple substitution passes are invoked
repl_string = '#$(@#&$@$*U#@)$@(#' # this string shouldn't occur anywhere I hope! :)
for varname, var in list(variables.items()):
if isinstance(var, ArrayVariable) and not var.scalar:
subs[varname] = f"{varname}[{repl_string}]"
# all newly created vars are arrays and will need indexing
for varname in created_vars:
subs[varname] = f"{varname}[{repl_string}]"
# Also index _vectorisation_idx so that e.g. rand() works correctly
subs['_vectorisation_idx'] = f"_vectorisation_idx[{repl_string}]"
line = word_substitute(line, subs)
line = line.replace(repl_string, index)
return line
def translate_one_statement_sequence(self, statements, scalar=False):
variables = self.variables
variable_indices = self.variable_indices
read, write, indices, conditional_write_vars = self.arrays_helper(statements)
lines = []
all_unique = not self.has_repeated_indices(statements)
if scalar or all_unique:
# Simple translation
lines.extend(self.read_arrays(read, write, indices, variables,
variable_indices))
created_vars = {stmt.var for stmt in statements if stmt.op == ':='}
for stmt in statements:
line = self.translate_statement(stmt)
line = self.conditional_write(line, stmt, variables,
conditional_write_vars,
created_vars)
lines.append(line)
lines.extend(self.write_arrays(statements, read, write, variables,
variable_indices))
else:
# More complex translation to deal with repeated indices
lines.extend(self.vectorise_code(statements, variables,
variable_indices))
return lines
def determine_keywords(self):
try:
import scipy
scipy_available = True
except ImportError:
scipy_available = False
return {'_scipy_available': scipy_available}
################################################################################
# Implement functions
################################################################################
# Functions that exist under the same name in numpy
for func_name, func in [('sin', np.sin), ('cos', np.cos), ('tan', np.tan),
('sinh', np.sinh), ('cosh', np.cosh), ('tanh', np.tanh),
('exp', np.exp), ('log', np.log), ('log10', np.log10),
('sqrt', np.sqrt), ('arcsin', np.arcsin),
('arccos', np.arccos), ('arctan', np.arctan),
('abs', np.abs), ('sign', np.sign)]:
DEFAULT_FUNCTIONS[func_name].implementations.add_implementation(NumpyCodeGenerator,
code=func)
# Functions that are implemented in a somewhat special way
def randn_func(vectorisation_idx):
try:
N = len(vectorisation_idx)
return np.random.randn(N)
except TypeError:
# scalar value
return np.random.randn()
def rand_func(vectorisation_idx):
try:
N = len(vectorisation_idx)
return np.random.rand(N)
except TypeError:
# scalar value
return np.random.rand()
def poisson_func(lam, vectorisation_idx):
try:
N = len(vectorisation_idx)
return np.random.poisson(lam, size=N)
except TypeError:
# scalar value
return np.random.poisson(lam)
DEFAULT_FUNCTIONS['randn'].implementations.add_implementation(NumpyCodeGenerator,
code=randn_func)
DEFAULT_FUNCTIONS['rand'].implementations.add_implementation(NumpyCodeGenerator,
code=rand_func)
DEFAULT_FUNCTIONS['poisson'].implementations.add_implementation(NumpyCodeGenerator,
code=poisson_func)
clip_func = lambda array, a_min, a_max: np.clip(array, a_min, a_max)
DEFAULT_FUNCTIONS['clip'].implementations.add_implementation(NumpyCodeGenerator,
code=clip_func)
int_func = lambda value: np.int32(value)
DEFAULT_FUNCTIONS['int'].implementations.add_implementation(NumpyCodeGenerator,
code=int_func)
ceil_func = lambda value: np.int32(np.ceil(value))
DEFAULT_FUNCTIONS['ceil'].implementations.add_implementation(NumpyCodeGenerator,
code=ceil_func)
floor_func = lambda value: np.int32(np.floor(value))
DEFAULT_FUNCTIONS['floor'].implementations.add_implementation(NumpyCodeGenerator,
code=floor_func)
# We need to explicitly add an implementation for the timestep function,
# otherwise Brian would *add* units during simulation, thinking that the
# timestep function would not work correctly otherwise. This would slow the
# function down significantly.
DEFAULT_FUNCTIONS['timestep'].implementations.add_implementation(NumpyCodeGenerator,
code=timestep)
| 46.959538 | 110 | 0.551576 |
import itertools
import numpy as np
from brian2.parsing.bast import brian_dtype_from_dtype
from brian2.parsing.rendering import NumpyNodeRenderer
from brian2.core.functions import DEFAULT_FUNCTIONS, timestep
from brian2.core.variables import ArrayVariable
from brian2.utils.stringtools import get_identifiers, word_substitute, indent
from brian2.utils.logger import get_logger
from .base import CodeGenerator
__all__ = ['NumpyCodeGenerator']
logger = get_logger(__name__)
class VectorisationError(Exception):
pass
class NumpyCodeGenerator(CodeGenerator):
class_name = 'numpy'
_use_ufunc_at_vectorisation = True
def translate_expression(self, expr):
expr = word_substitute(expr, self.func_name_replacements)
return NumpyNodeRenderer(auto_vectorise=self.auto_vectorise).render_expr(expr, self.variables).strip()
def translate_statement(self, statement):
var, op, expr, comment = (statement.var, statement.op,
statement.expr, statement.comment)
if op == ':=':
op = '='
if (statement.used_boolean_variables is not None and len(statement.used_boolean_variables)==1
and brian_dtype_from_dtype(statement.dtype)=='float'
and statement.complexity_std>sum(statement.complexities.values())):
used_boolvars = statement.used_boolean_variables
bool_simp = statement.boolean_simplified_expressions
boolvar = used_boolvars[0]
for bool_assigns, simp_expr in bool_simp.items():
_, boolval = bool_assigns[0]
if boolval:
expr_true = simp_expr
else:
expr_false = simp_expr
code = f'{var} {op} _numpy.where({boolvar}, {expr_true}, {expr_false})'
else:
code = f"{var} {op} {self.translate_expression(expr)}"
if len(comment):
code += f" # {comment}"
return code
def ufunc_at_vectorisation(self, statement, variables, indices,
conditional_write_vars, created_vars, used_variables):
if not self._use_ufunc_at_vectorisation:
raise VectorisationError()
from brian2.devices.device import device
used = set(get_identifiers(statement.expr))
used = used.intersection(k for k in list(variables.keys()) if k in indices and indices[k]!='_idx')
used_variables.update(used)
if statement.var in used_variables:
raise VectorisationError()
expr = NumpyNodeRenderer(auto_vectorise=self.auto_vectorise).render_expr(statement.expr)
if statement.op == ':=' or indices[statement.var] == '_idx' or not statement.inplace:
if statement.op == ':=':
op = '='
else:
op = statement.op
line = f'{statement.var} {op} {expr}'
elif statement.inplace:
if statement.op == '+=':
ufunc_name = '_numpy.add'
elif statement.op == '*=':
ufunc_name = '_numpy.multiply'
elif statement.op == '/=':
ufunc_name = '_numpy.divide'
elif statement.op == '-=':
ufunc_name = '_numpy.subtract'
else:
raise VectorisationError()
array_name = device.get_array_name(variables[statement.var])
idx = indices[statement.var]
line = f'{ufunc_name}.at({array_name}, {idx}, {expr})'
line = self.conditional_write(line, statement, variables,
conditional_write_vars=conditional_write_vars,
created_vars=created_vars)
else:
raise VectorisationError()
if len(statement.comment):
line += f" # {statement.comment}"
return line
def vectorise_code(self, statements, variables, variable_indices, index='_idx'):
created_vars = {stmt.var for stmt in statements if stmt.op == ':='}
try:
lines = []
used_variables = set()
for statement in statements:
lines.append(f'# Abstract code: {statement.var} {statement.op} {statement.expr}')
read, write, indices, conditional_write_vars = self.arrays_helper([statement])
ufunc_lines = []
if (statement.inplace and
variable_indices[statement.var] != '_idx' and
statement.var not in get_identifiers(statement.expr)):
read = read - {statement.var}
ufunc_lines.extend(self.read_arrays(read, write, indices,
variables, variable_indices))
ufunc_lines.append(self.ufunc_at_vectorisation(statement,
variables,
variable_indices,
conditional_write_vars,
created_vars,
used_variables,
))
if statement.inplace and variable_indices[statement.var] != '_idx':
write = write - {statement.var}
ufunc_lines.extend(self.write_arrays([statement], read, write,
variables,
variable_indices))
lines.extend(ufunc_lines)
except VectorisationError:
if self._use_ufunc_at_vectorisation:
logger.info("Failed to vectorise code, falling back on Python loop: note that "
"this will be very slow! Switch to another code generation target for "
"best performance (e.g. cython). First line is: "+str(statements[0]),
once=True)
lines = []
lines.extend(['_full_idx = _idx',
'for _idx in _full_idx:',
' _vectorisation_idx = _idx'
])
read, write, indices, conditional_write_vars = self.arrays_helper(statements)
lines.extend(indent(code) for code in
self.read_arrays(read, write, indices,
variables, variable_indices))
for statement in statements:
line = self.translate_statement(statement)
if statement.var in conditional_write_vars:
lines.append(indent(f'if {conditional_write_vars[statement.var]}:'))
lines.append(indent(line, 2))
else:
lines.append(indent(line))
lines.extend(indent(code) for code in
self.write_arrays(statements, read, write,
variables, variable_indices))
return lines
def read_arrays(self, read, write, indices, variables, variable_indices):
lines = []
for varname in itertools.chain(indices, read):
var = variables[varname]
index = variable_indices[varname]
line = f"{varname} = {self.get_array_name(var)}"
if not index in self.iterate_all:
line += f"[{index}]"
elif varname in write:
line += '.copy()'
lines.append(line)
return lines
def write_arrays(self, statements, read, write, variables, variable_indices):
lines = []
for varname in write:
var = variables[varname]
index_var = variable_indices[varname]
# whole vector, if so we don't need to write the array back
if index_var not in self.iterate_all or varname in read:
all_inplace = False
else:
all_inplace = True
for stmt in statements:
if stmt.var == varname and not stmt.inplace:
all_inplace = False
break
if not all_inplace:
line = self.get_array_name(var)
if index_var in self.iterate_all:
line = f"{line}[:]"
else:
line = f"{line}[{index_var}]"
line = f"{line} = {varname}"
lines.append(line)
return lines
def conditional_write(self, line, stmt, variables, conditional_write_vars,
created_vars):
if stmt.var in conditional_write_vars:
subs = {}
index = conditional_write_vars[stmt.var]
# multiple substitution passes are invoked
repl_string = 'epl_string}]"
for varname in created_vars:
subs[varname] = f"{varname}[{repl_string}]"
subs['_vectorisation_idx'] = f"_vectorisation_idx[{repl_string}]"
line = word_substitute(line, subs)
line = line.replace(repl_string, index)
return line
def translate_one_statement_sequence(self, statements, scalar=False):
variables = self.variables
variable_indices = self.variable_indices
read, write, indices, conditional_write_vars = self.arrays_helper(statements)
lines = []
all_unique = not self.has_repeated_indices(statements)
if scalar or all_unique:
lines.extend(self.read_arrays(read, write, indices, variables,
variable_indices))
created_vars = {stmt.var for stmt in statements if stmt.op == ':='}
for stmt in statements:
line = self.translate_statement(stmt)
line = self.conditional_write(line, stmt, variables,
conditional_write_vars,
created_vars)
lines.append(line)
lines.extend(self.write_arrays(statements, read, write, variables,
variable_indices))
else:
lines.extend(self.vectorise_code(statements, variables,
variable_indices))
return lines
def determine_keywords(self):
try:
import scipy
scipy_available = True
except ImportError:
scipy_available = False
return {'_scipy_available': scipy_available}
| true | true |
f721282f6a2dc461afc65e2af7a6340bab2f41d6 | 7,584 | py | Python | cfripper/rules/wildcard_principals.py | claytonbrown/cfripper | 869eb5861da3fcfaa5e2f5e877fa9c30f60cfce9 | [
"Apache-2.0"
] | 360 | 2018-08-08T12:34:58.000Z | 2022-03-25T17:01:41.000Z | cfripper/rules/wildcard_principals.py | Skyscanner/cfripper | 1bc3ff483ac9c126037f796950ebe52cf463ac17 | [
"Apache-2.0"
] | 40 | 2018-11-26T07:08:15.000Z | 2022-03-02T09:10:45.000Z | cfripper/rules/wildcard_principals.py | claytonbrown/cfripper | 869eb5861da3fcfaa5e2f5e877fa9c30f60cfce9 | [
"Apache-2.0"
] | 51 | 2018-11-09T11:46:32.000Z | 2022-03-28T08:47:28.000Z | __all__ = ["GenericWildcardPrincipalRule", "PartialWildcardPrincipalRule", "FullWildcardPrincipalRule"]
import logging
import re
from typing import Dict, Optional
from pycfmodel.model.cf_model import CFModel
from pycfmodel.model.resources.iam_managed_policy import IAMManagedPolicy
from pycfmodel.model.resources.iam_policy import IAMPolicy
from pycfmodel.model.resources.iam_role import IAMRole
from pycfmodel.model.resources.iam_user import IAMUser
from pycfmodel.model.resources.properties.policy_document import PolicyDocument
from pycfmodel.model.resources.s3_bucket_policy import S3BucketPolicy
from pycfmodel.model.resources.sns_topic_policy import SNSTopicPolicy
from pycfmodel.model.resources.sqs_queue_policy import SQSQueuePolicy
from cfripper.config.regex import REGEX_FULL_WILDCARD_PRINCIPAL, REGEX_PARTIAL_WILDCARD_PRINCIPAL
from cfripper.model.enums import RuleGranularity, RuleRisk
from cfripper.model.result import Result
from cfripper.rules.base_rules import PrincipalCheckingRule
logger = logging.getLogger(__file__)
class GenericWildcardPrincipalRule(PrincipalCheckingRule):
REASON_WILDCARD_PRINCIPAL = "{} should not allow wildcards in principals (principal: '{}')"
GRANULARITY = RuleGranularity.RESOURCE
AWS_ACCOUNT_ID_PATTERN = re.compile(r"^(\d{12})$")
IAM_PATTERN = re.compile(r"arn:aws:iam::(\d*|\*):.*")
FULL_REGEX = REGEX_FULL_WILDCARD_PRINCIPAL
def invoke(self, cfmodel: CFModel, extras: Optional[Dict] = None) -> Result:
result = Result()
for logical_id, resource in cfmodel.Resources.items():
if isinstance(resource, (IAMManagedPolicy, IAMPolicy, S3BucketPolicy, SNSTopicPolicy, SQSQueuePolicy)):
self.check_for_wildcards(result, logical_id, resource.Properties.PolicyDocument, extras)
elif isinstance(resource, (IAMRole, IAMUser)):
if isinstance(resource, IAMRole):
self.check_for_wildcards(result, logical_id, resource.Properties.AssumeRolePolicyDocument, extras)
if resource.Properties and resource.Properties.Policies:
for policy in resource.Properties.Policies:
self.check_for_wildcards(result, logical_id, policy.PolicyDocument, extras)
return result
def check_for_wildcards(
self, result: Result, logical_id: str, resource: PolicyDocument, extras: Optional[Dict] = None
):
for statement in resource._statement_as_list():
if statement.Effect == "Allow" and statement.principals_with(self.FULL_REGEX):
for principal in statement.get_principal_list():
account_id_match = self.IAM_PATTERN.match(principal) or self.AWS_ACCOUNT_ID_PATTERN.match(principal)
account_id = account_id_match.group(1) if account_id_match else None
# Check if account ID is allowed. `self._get_allowed_from_config()` used here
# to reduce number of false negatives and only allow exemptions for accounts
# which belong to AWS Services (such as ELB and ElastiCache).
if account_id in self._get_allowed_from_config():
continue
if statement.Condition and statement.Condition.dict():
logger.warning(
f"Not adding {type(self).__name__} failure in {logical_id} because there are conditions: "
f"{statement.Condition}"
)
else:
self.add_failure_to_result(
result,
self.REASON_WILDCARD_PRINCIPAL.format(logical_id, principal),
resource_ids={logical_id},
context={
"config": self._config,
"extras": extras,
"logical_id": logical_id,
"resource": resource,
"statement": statement,
"principal": principal,
"account_id": account_id,
},
)
class PartialWildcardPrincipalRule(GenericWildcardPrincipalRule):
"""
Checks for any wildcard or account-wide principals defined in any statements. This rule will flag
as non-compliant any principals where `root` or `*` are included at the end of the value, for
example, `arn:aws:iam:12345:12345*`.
Risk:
It might allow other AWS identities or the root access of the account to escalate privileges.
Fix:
Where possible, restrict the access to only the required resources.
For example, instead of `Principal: "*"`, include a list of the roles that need access.
Filters context:
| Parameter | Type | Description |
|:-----------:|:------------------:|:--------------------------------------------------------------:|
|`config` | str | `config` variable available inside the rule |
|`extras` | str | `extras` variable available inside the rule |
|`logical_id` | str | ID used in Cloudformation to refer the resource being analysed |
|`resource` | `S3BucketPolicy` | Resource that is being addressed |
|`statement` | `Statement` | Statement being checked found in the Resource |
|`principal` | str | AWS Principal being checked found in the statement |
|`account_id` | str | Account ID found in the principal |
"""
REASON_WILDCARD_PRINCIPAL = (
"{} should not allow wildcard in principals or account-wide principals (principal: '{}')"
)
RISK_VALUE = RuleRisk.MEDIUM
FULL_REGEX = REGEX_PARTIAL_WILDCARD_PRINCIPAL
class FullWildcardPrincipalRule(GenericWildcardPrincipalRule):
"""
Checks for any wildcard principals defined in any statements.
Risk:
It might allow other AWS identities to escalate privileges.
Fix:
Where possible, restrict the access to only the required resources.
For example, instead of `Principal: "*"`, include a list of the roles that need access.
Filters context:
| Parameter | Type | Description |
|:-----------:|:------------------:|:--------------------------------------------------------------:|
|`config` | str | `config` variable available inside the rule |
|`extras` | str | `extras` variable available inside the rule |
|`logical_id` | str | ID used in Cloudformation to refer the resource being analysed |
|`resource` | `S3BucketPolicy` | Resource that is being addressed |
|`statement` | `Statement` | Statement being checked found in the Resource |
|`principal` | str | AWS Principal being checked found in the statement |
|`account_id` | str | Account ID found in the principal |
"""
RISK_VALUE = RuleRisk.HIGH
| 54.956522 | 120 | 0.587421 | __all__ = ["GenericWildcardPrincipalRule", "PartialWildcardPrincipalRule", "FullWildcardPrincipalRule"]
import logging
import re
from typing import Dict, Optional
from pycfmodel.model.cf_model import CFModel
from pycfmodel.model.resources.iam_managed_policy import IAMManagedPolicy
from pycfmodel.model.resources.iam_policy import IAMPolicy
from pycfmodel.model.resources.iam_role import IAMRole
from pycfmodel.model.resources.iam_user import IAMUser
from pycfmodel.model.resources.properties.policy_document import PolicyDocument
from pycfmodel.model.resources.s3_bucket_policy import S3BucketPolicy
from pycfmodel.model.resources.sns_topic_policy import SNSTopicPolicy
from pycfmodel.model.resources.sqs_queue_policy import SQSQueuePolicy
from cfripper.config.regex import REGEX_FULL_WILDCARD_PRINCIPAL, REGEX_PARTIAL_WILDCARD_PRINCIPAL
from cfripper.model.enums import RuleGranularity, RuleRisk
from cfripper.model.result import Result
from cfripper.rules.base_rules import PrincipalCheckingRule
logger = logging.getLogger(__file__)
class GenericWildcardPrincipalRule(PrincipalCheckingRule):
REASON_WILDCARD_PRINCIPAL = "{} should not allow wildcards in principals (principal: '{}')"
GRANULARITY = RuleGranularity.RESOURCE
AWS_ACCOUNT_ID_PATTERN = re.compile(r"^(\d{12})$")
IAM_PATTERN = re.compile(r"arn:aws:iam::(\d*|\*):.*")
FULL_REGEX = REGEX_FULL_WILDCARD_PRINCIPAL
def invoke(self, cfmodel: CFModel, extras: Optional[Dict] = None) -> Result:
result = Result()
for logical_id, resource in cfmodel.Resources.items():
if isinstance(resource, (IAMManagedPolicy, IAMPolicy, S3BucketPolicy, SNSTopicPolicy, SQSQueuePolicy)):
self.check_for_wildcards(result, logical_id, resource.Properties.PolicyDocument, extras)
elif isinstance(resource, (IAMRole, IAMUser)):
if isinstance(resource, IAMRole):
self.check_for_wildcards(result, logical_id, resource.Properties.AssumeRolePolicyDocument, extras)
if resource.Properties and resource.Properties.Policies:
for policy in resource.Properties.Policies:
self.check_for_wildcards(result, logical_id, policy.PolicyDocument, extras)
return result
def check_for_wildcards(
self, result: Result, logical_id: str, resource: PolicyDocument, extras: Optional[Dict] = None
):
for statement in resource._statement_as_list():
if statement.Effect == "Allow" and statement.principals_with(self.FULL_REGEX):
for principal in statement.get_principal_list():
account_id_match = self.IAM_PATTERN.match(principal) or self.AWS_ACCOUNT_ID_PATTERN.match(principal)
account_id = account_id_match.group(1) if account_id_match else None
if account_id in self._get_allowed_from_config():
continue
if statement.Condition and statement.Condition.dict():
logger.warning(
f"Not adding {type(self).__name__} failure in {logical_id} because there are conditions: "
f"{statement.Condition}"
)
else:
self.add_failure_to_result(
result,
self.REASON_WILDCARD_PRINCIPAL.format(logical_id, principal),
resource_ids={logical_id},
context={
"config": self._config,
"extras": extras,
"logical_id": logical_id,
"resource": resource,
"statement": statement,
"principal": principal,
"account_id": account_id,
},
)
class PartialWildcardPrincipalRule(GenericWildcardPrincipalRule):
REASON_WILDCARD_PRINCIPAL = (
"{} should not allow wildcard in principals or account-wide principals (principal: '{}')"
)
RISK_VALUE = RuleRisk.MEDIUM
FULL_REGEX = REGEX_PARTIAL_WILDCARD_PRINCIPAL
class FullWildcardPrincipalRule(GenericWildcardPrincipalRule):
RISK_VALUE = RuleRisk.HIGH
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.