hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72e27140e31589687803d315c93b47f71e3a265 | 770 | py | Python | backend/migrations/versions/c8f4b08529a4_.py | Tobiaqs/realtime | e6ff4110a71e1a806c37ae9b071328e1a5c6f41e | [
"MIT"
] | 2 | 2017-05-16T11:49:10.000Z | 2017-07-29T23:50:10.000Z | backend/migrations/versions/c8f4b08529a4_.py | Tobiaqs/realtime | e6ff4110a71e1a806c37ae9b071328e1a5c6f41e | [
"MIT"
] | 45 | 2017-05-10T08:48:15.000Z | 2020-08-31T10:17:19.000Z | backend/migrations/versions/c8f4b08529a4_.py | CodeYellowBV/cy-time | e5c0641e772c2c1ade88df5564d98a2765d5753a | [
"MIT"
] | 2 | 2017-08-02T18:18:41.000Z | 2020-10-12T09:01:15.000Z | """empty message
Revision ID: c8f4b08529a4
Revises: bbd324935815
Create Date: 2017-05-02 00:04:57.131824
"""
# revision identifiers, used by Alembic.
revision = 'c8f4b08529a4'
down_revision = 'bbd324935815'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('entries', sa.Column('user_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'entries', 'users', ['user_id'], ['id'], ondelete='cascade')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'entries', type_='foreignkey')
op.drop_column('entries', 'user_id')
# ### end Alembic commands ###
| 26.551724 | 92 | 0.683117 |
revision = 'c8f4b08529a4'
down_revision = 'bbd324935815'
from alembic import op
import sqlalchemy as sa
def upgrade():
| true | true |
f72e27886171d70eb1ffb2fa7dec9f742588ee5f | 2,303 | py | Python | ckan/lib/navl/validators.py | code4sac/ckan | 5354769c480f4ad115bf53ca7450d3f49c837edb | [
"Apache-2.0"
] | 2 | 2015-11-05T12:04:52.000Z | 2017-08-09T11:29:11.000Z | ckan/lib/navl/validators.py | code4sac/ckan | 5354769c480f4ad115bf53ca7450d3f49c837edb | [
"Apache-2.0"
] | null | null | null | ckan/lib/navl/validators.py | code4sac/ckan | 5354769c480f4ad115bf53ca7450d3f49c837edb | [
"Apache-2.0"
] | 4 | 2016-12-17T22:26:06.000Z | 2017-01-20T21:51:24.000Z | from dictization_functions import missing, StopOnError, Invalid
from pylons.i18n import _
def identity_converter(key, data, errors, context):
return
def keep_extras(key, data, errors, context):
extras = data.pop(key, {})
for extras_key, value in extras.iteritems():
data[key[:-1] + (extras_key,)] = value
def not_missing(key, data, errors, context):
value = data.get(key)
if value is missing:
errors[key].append(_('Missing value'))
raise StopOnError
def not_empty(key, data, errors, context):
value = data.get(key)
if not value or value is missing:
errors[key].append(_('Missing value'))
raise StopOnError
def if_empty_same_as(other_key):
def callable(key, data, errors, context):
value = data.get(key)
if not value or value is missing:
data[key] = data[key[:-1] + (other_key,)]
return callable
def both_not_empty(other_key):
def callable(key, data, errors, context):
value = data.get(key)
other_value = data.get(key[:-1] + (other_key,))
if (not value or value is missing and
not other_value or other_value is missing):
errors[key].append(_('Missing value'))
raise StopOnError
return callable
def empty(key, data, errors, context):
value = data.pop(key, None)
if value and value is not missing:
errors[key].append(_(
'The input field %(name)s was not expected.') % {"name": key[-1]})
def ignore(key, data, errors, context):
value = data.pop(key, None)
raise StopOnError
def default(defalult_value):
def callable(key, data, errors, context):
value = data.get(key)
if not value or value is missing:
data[key] = defalult_value
return callable
def ignore_missing(key, data, errors, context):
value = data.get(key)
if value is missing or value is None:
data.pop(key, None)
raise StopOnError
def ignore_empty(key, data, errors, context):
value = data.get(key)
if value is missing or not value:
data.pop(key, None)
raise StopOnError
def convert_int(value, context):
try:
return int(value)
except ValueError:
raise Invalid(_('Please enter an integer value'))
| 24.242105 | 78 | 0.63439 | from dictization_functions import missing, StopOnError, Invalid
from pylons.i18n import _
def identity_converter(key, data, errors, context):
return
def keep_extras(key, data, errors, context):
extras = data.pop(key, {})
for extras_key, value in extras.iteritems():
data[key[:-1] + (extras_key,)] = value
def not_missing(key, data, errors, context):
value = data.get(key)
if value is missing:
errors[key].append(_('Missing value'))
raise StopOnError
def not_empty(key, data, errors, context):
value = data.get(key)
if not value or value is missing:
errors[key].append(_('Missing value'))
raise StopOnError
def if_empty_same_as(other_key):
def callable(key, data, errors, context):
value = data.get(key)
if not value or value is missing:
data[key] = data[key[:-1] + (other_key,)]
return callable
def both_not_empty(other_key):
def callable(key, data, errors, context):
value = data.get(key)
other_value = data.get(key[:-1] + (other_key,))
if (not value or value is missing and
not other_value or other_value is missing):
errors[key].append(_('Missing value'))
raise StopOnError
return callable
def empty(key, data, errors, context):
value = data.pop(key, None)
if value and value is not missing:
errors[key].append(_(
'The input field %(name)s was not expected.') % {"name": key[-1]})
def ignore(key, data, errors, context):
value = data.pop(key, None)
raise StopOnError
def default(defalult_value):
def callable(key, data, errors, context):
value = data.get(key)
if not value or value is missing:
data[key] = defalult_value
return callable
def ignore_missing(key, data, errors, context):
value = data.get(key)
if value is missing or value is None:
data.pop(key, None)
raise StopOnError
def ignore_empty(key, data, errors, context):
value = data.get(key)
if value is missing or not value:
data.pop(key, None)
raise StopOnError
def convert_int(value, context):
try:
return int(value)
except ValueError:
raise Invalid(_('Please enter an integer value'))
| true | true |
f72e27b5b949fa0026189d37fc6c50cbd9123218 | 848 | py | Python | markovdwp/runtime/utils/common.py | ivannz/MarkovDWP | f10ed7a331ddd9b7fc28c4cab3b05b2352a9ee2b | [
"MIT"
] | null | null | null | markovdwp/runtime/utils/common.py | ivannz/MarkovDWP | f10ed7a331ddd9b7fc28c4cab3b05b2352a9ee2b | [
"MIT"
] | null | null | null | markovdwp/runtime/utils/common.py | ivannz/MarkovDWP | f10ed7a331ddd9b7fc28c4cab3b05b2352a9ee2b | [
"MIT"
] | null | null | null | from ...utils.dicttools import flatten, aggregate, propagate
def prepare_log(details, level=5, delim='/'):
prepared = aggregate(flatten(details, delim='.'), level=level, delim='.')
return {k.replace('.', delim): v for k, v in prepared.items()}
def weighted_sum(terms, **coef):
# 1. compute the final loss
C = dict(propagate({'': 1.0, **coef}, terms, delim='.'))
value = sum(v * C[k] for k, v in terms.items())
# 2. return differentiable loss and its components as floats
return value, {k: float(v) for k, v in terms.items()}
def collate(records):
out = {}
for record in records:
for k, v in record.items():
out.setdefault(k, []).append(v)
return out
def linear(t, t0=0, t1=100, v0=1., v1=0.):
tau = min(1., max(0., (t1 - t) / (t1 - t0)))
return v0 * tau + v1 * (1 - tau)
| 29.241379 | 77 | 0.59316 | from ...utils.dicttools import flatten, aggregate, propagate
def prepare_log(details, level=5, delim='/'):
prepared = aggregate(flatten(details, delim='.'), level=level, delim='.')
return {k.replace('.', delim): v for k, v in prepared.items()}
def weighted_sum(terms, **coef):
C = dict(propagate({'': 1.0, **coef}, terms, delim='.'))
value = sum(v * C[k] for k, v in terms.items())
return value, {k: float(v) for k, v in terms.items()}
def collate(records):
out = {}
for record in records:
for k, v in record.items():
out.setdefault(k, []).append(v)
return out
def linear(t, t0=0, t1=100, v0=1., v1=0.):
tau = min(1., max(0., (t1 - t) / (t1 - t0)))
return v0 * tau + v1 * (1 - tau)
| true | true |
f72e27c302f229f0a9dadb88497decfdfe800148 | 3,189 | py | Python | remove_code/my_data_mining.py | JohnZhang000/adaptive-jpeg-compression | f54e4798c01169812958f4d5539a03927dbdc313 | [
"MIT"
] | null | null | null | remove_code/my_data_mining.py | JohnZhang000/adaptive-jpeg-compression | f54e4798c01169812958f4d5539a03927dbdc313 | [
"MIT"
] | null | null | null | remove_code/my_data_mining.py | JohnZhang000/adaptive-jpeg-compression | f54e4798c01169812958f4d5539a03927dbdc313 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 26 17:33:03 2021
@author: ubuntu204
"""
import numpy as np
from scipy import stats
import statsmodels.stats.multitest as multitest
import matplotlib.pyplot as plt
import os
import pandas as pd
from pandas import Series,DataFrame
# import seaborn as sns
# import palettable
from sklearn import datasets
from tqdm import tqdm
plt.rcParams['font.sans-serif']=['SimHei']
# plt.rcParams['axes.unicode_mnius']=False
epsilon=1e-10
def volcano_mine(data1,data2,method='hs',flag_output_src=0,flag_plot=0):
data1=data1+epsilon
data2=data2+epsilon
mdata1=data1.mean(axis=0)
mdata2=data2.mean(axis=0)
fold_change=(mdata2)/(mdata1)
log2_fold_change=np.log2(fold_change)
p_values=np.zeros_like(mdata1)
for i in tqdm(range(len(p_values))):
t,p=stats.ttest_ind(data1[:,i],data2[:,i])
p_values[i]=p
rejects,pvals_corrected,alphaSidak,alphaBonf=multitest.multipletests(p_values,method=method)
log10_pvals_corrected=np.log10(pvals_corrected+epsilon)*(-1)
return log2_fold_change,log10_pvals_corrected
def plot_volume(log2_fold_change,log10_pvals_corrected,title=None,saved_name=None):
npt=len(log2_fold_change)
colors=list(['grey']*npt)
idx_green=(log2_fold_change>=np.log2(1.2))&(log10_pvals_corrected>(-np.log10(0.05)))
for i in range(len(idx_green)):
if idx_green[i]:
colors[i]='green'
idx_red=(log2_fold_change<=-np.log2(1.2))&(log10_pvals_corrected>(-np.log10(0.05)))
for i in range(len(idx_red)):
if idx_red[i]:
colors[i]='red'
# colors[idx_red]='red'
plt.figure()
plt.style.use('seaborn-whitegrid')
plt.scatter(log2_fold_change, log10_pvals_corrected, color=colors)
plt.xlabel('Log2 Fold Change')
plt.ylabel('-Log10 P-Value')
if title:
plt.title(title)
if saved_name:
plt.savefig(saved_name,bbox_inches='tight',dpi=300)
return
# def plot_heatmap(data,row_c=None,dpi=300,figsize=(8/2.54,16/2.54),saved_name=None):
# # plt.figure(dpi=dpi)
# data_show=data.copy()
# # data_show=data.drop(['class'],axis=1)
# if row_c:
# row_colors=data['class'].map(row_c)
# sns.clustermap(data=data_show,method='single',metric='euclidean',
# figsize=figsize,row_cluster=False,col_cluster=False,
# cmap='rainbow')
# sns.set(font_scale=1.5)
# if saved_name:
# plt.savefig(saved_name,bbox_inches='tight',dpi=dpi)
if __name__=='__main__':
# data1=np.random.rand(5, 10)
# data2=np.random.rand(5, 10)
# data2[:,0]=data1[:,0]*2.5
# data2[:,1]=data1[:,1]*10
# data2[:,2]=data1[:,2]/2.5
# data2[:,3]=data1[:,3]/10
# logFC,logP=volcano_mine(data1, data2)
# plot_volume(logFC,logP)
iris=datasets.load_iris()
x,y=iris.data,iris.target
data=np.hstack((x,y.reshape(150,1)))
pd_iris=pd.DataFrame(data,columns=['sepal length(cm)','sepal width(cm)','petal length(cm)','petal width(cm)','class'])
row_c=dict(zip(pd_iris['class'].unique(),['green','yellow','pink']))
# plot_heatmap(pd_iris,row_c=row_c) | 33.21875 | 122 | 0.665412 |
import numpy as np
from scipy import stats
import statsmodels.stats.multitest as multitest
import matplotlib.pyplot as plt
import os
import pandas as pd
from pandas import Series,DataFrame
from sklearn import datasets
from tqdm import tqdm
plt.rcParams['font.sans-serif']=['SimHei']
epsilon=1e-10
def volcano_mine(data1,data2,method='hs',flag_output_src=0,flag_plot=0):
data1=data1+epsilon
data2=data2+epsilon
mdata1=data1.mean(axis=0)
mdata2=data2.mean(axis=0)
fold_change=(mdata2)/(mdata1)
log2_fold_change=np.log2(fold_change)
p_values=np.zeros_like(mdata1)
for i in tqdm(range(len(p_values))):
t,p=stats.ttest_ind(data1[:,i],data2[:,i])
p_values[i]=p
rejects,pvals_corrected,alphaSidak,alphaBonf=multitest.multipletests(p_values,method=method)
log10_pvals_corrected=np.log10(pvals_corrected+epsilon)*(-1)
return log2_fold_change,log10_pvals_corrected
def plot_volume(log2_fold_change,log10_pvals_corrected,title=None,saved_name=None):
npt=len(log2_fold_change)
colors=list(['grey']*npt)
idx_green=(log2_fold_change>=np.log2(1.2))&(log10_pvals_corrected>(-np.log10(0.05)))
for i in range(len(idx_green)):
if idx_green[i]:
colors[i]='green'
idx_red=(log2_fold_change<=-np.log2(1.2))&(log10_pvals_corrected>(-np.log10(0.05)))
for i in range(len(idx_red)):
if idx_red[i]:
colors[i]='red'
plt.figure()
plt.style.use('seaborn-whitegrid')
plt.scatter(log2_fold_change, log10_pvals_corrected, color=colors)
plt.xlabel('Log2 Fold Change')
plt.ylabel('-Log10 P-Value')
if title:
plt.title(title)
if saved_name:
plt.savefig(saved_name,bbox_inches='tight',dpi=300)
return
iris=datasets.load_iris()
x,y=iris.data,iris.target
data=np.hstack((x,y.reshape(150,1)))
pd_iris=pd.DataFrame(data,columns=['sepal length(cm)','sepal width(cm)','petal length(cm)','petal width(cm)','class'])
row_c=dict(zip(pd_iris['class'].unique(),['green','yellow','pink']))
| true | true |
f72e2822918932f7ca53aa163efd65081ca744f9 | 9,084 | py | Python | test/unit/anchore_engine/services/apiext/test_api_utils.py | Btodhunter/anchore-engine | 0f7ce6dea5f6c24c07616355affc64fdbfe1d6ef | [
"Apache-2.0"
] | null | null | null | test/unit/anchore_engine/services/apiext/test_api_utils.py | Btodhunter/anchore-engine | 0f7ce6dea5f6c24c07616355affc64fdbfe1d6ef | [
"Apache-2.0"
] | null | null | null | test/unit/anchore_engine/services/apiext/test_api_utils.py | Btodhunter/anchore-engine | 0f7ce6dea5f6c24c07616355affc64fdbfe1d6ef | [
"Apache-2.0"
] | null | null | null | """
Unit tests for the api controller utils of external API service
"""
import base64
import json
import yaml
import pytest
from anchore_engine.services.apiext.api.controllers import utils as api_utils
from anchore_engine.subsys import logger
logger.enable_test_logging('INFO')
spec_path = 'anchore_engine/services/apiext/swagger/swagger.yaml'
b64_dockerfile = str(base64.encodebytes(b'FROM stratch\nRUN echo "hello" > file\n'), 'utf-8')
raw_dockerfile = 'FROM stratch\nRUN echo "hello" > file\n'
def _load_spec(path):
with open(path) as f:
if path.endswith('yaml') or path.endswith('yml'):
return yaml.load(f, Loader=yaml.FullLoader)
else:
return json.load(f)
api_spec = _load_spec(spec_path)
test_digest = 'sha256:0123456789012345678901234567890123456789012345678901234567890123'
test_ts = '2019-01-01T01:01:01Z'
def test_valid_digest():
matrix = [
(test_digest, True),
(test_digest[:-1], False),
('sha', False),
('sha256:abc', False)
]
for input, result in matrix:
assert bool(api_utils.DIGEST_REGEX.match(input) is not None) == result
def test_validate_pullstring_tag():
logger.info('Testing tag-based pullstring validator')
matrix = [
('docker.io/library/nginx:latest', True),
('docker.io/nginx:latest', True),
('docker.io/library/nginx', True),
('docker.io/nginx', True),
('docker.io/nginx@{}'.format(test_digest), False),
('docker.io/library/nginx@{}'.format(test_digest), False),
('nginx@{}'.format(test_digest), False)
]
for input, result in matrix:
assert api_utils.validate_pullstring_is_tag(input) == result
def test_validate_pullstring_digest():
logger.info('Testing digest-based pullstring validator')
matrix = [
('docker.io/library/nginx:latest', False),
('docker.io/nginx:latest', False),
('docker.io/library/nginx', False),
('docker.io/nginx', False),
('docker.io/library/nginx@{}'.format(test_digest), True),
('docker.io/nginx@{}'.format(test_digest), True),
('nginx@{}'.format(test_digest), True),
('localhost:5000/my_nginx@{}'.format(test_digest), True)
]
for input, result in matrix:
assert api_utils.validate_pullstring_is_digest(input) == result
def test_tag_source_validator():
logger.info("Testing tag source validator")
api_utils.validate_tag_source(tag_source={'pullstring': 'docker.io/nginx:latest'}, api_schema=api_spec)
with pytest.raises(Exception):
api_utils.validate_tag_source(tag_source={'t': 'docker.io/nginx:latest'}, api_schema=api_spec)
with pytest.raises(Exception):
api_utils.validate_tag_source(tag_source={'pullstring': 'docker.io/nginx@{}'.format(test_digest)}, api_schema=api_spec)
def test_digest_source_validator():
logger.info("Testing digest source validator")
api_utils.validate_digest_source(digest_source={'pullstring': 'docker.io/nginx@{}'.format(test_digest), 'tag': 'docker.io/nginx:latest', 'creation_timestamp_override': '2019-01-01T01:01:01Z'},
api_schema=api_spec)
api_utils.validate_digest_source(digest_source={'pullstring': 'docker.io/library/nginx@{}'.format(test_digest), 'tag': 'docker.io/librarynginx:latest', 'creation_timestamp_override': '2019-01-01T01:01:01Z'},
api_schema=api_spec)
api_utils.validate_digest_source(digest_source={'pullstring': 'nginx@{}'.format(test_digest), 'tag': 'nginx:latest', 'creation_timestamp_override': '2019-01-01T01:01:01Z'},
api_schema=api_spec)
api_utils.validate_digest_source(digest_source={'pullstring': 'docker.io/nginx@{}'.format(test_digest), 'tag': 'docker.io/nginx:latest', 'creation_timestamp_override': '2019-01-01T01:01:01Z'},
api_schema=api_spec)
with pytest.raises(Exception):
api_utils.validate_digest_source(digest_source={'t': 'docker.io/nginx:latest'}, api_schema=api_spec)
with pytest.raises(Exception):
api_utils.validate_digest_source(digest_source={'pullstring': 'docker.io/nginx@{}'.format(test_digest)}, api_schema=api_spec)
def test_tag_normalization():
matrix = [
({'tag': 'docker.io/library/nginx:1.7'}, {'source': {'tag': {'pullstring': 'docker.io/library/nginx:1.7'}}}),
({'tag': 'docker.io/nginx'}, {'source': {'tag': {'pullstring': 'docker.io/nginx'}}}),
({'tag': 'docker.io/nginx@sha256:abc'}, {'source': {'tag': {'pullstring': 'docker.io/nginx@sha256:abc'}}})
]
for test_input, result in matrix:
if type(result) == type and issubclass(result, Exception):
with pytest.raises(result):
normalized = api_utils.normalize_image_add_source(test_input)
else:
assert api_utils.normalize_image_add_source(test_input) == result
def test_digest_normalization():
matrix = [
({'created_at': '2019-01-01T01:01:01Z', 'tag': 'docker.io/nginx', 'digest': test_digest},
{'source': {'digest': {'creation_timestamp_override': '2019-01-01T01:01:01Z', 'pullstring': 'docker.io/nginx@{}'.format(test_digest), 'tag': 'docker.io/nginx'}}}),
({'created_at': '2019-01-01T01:01:01Z', 'tag': 'docker.io/nginx:latest', 'digest': test_digest},
{'source': {'digest': {'creation_timestamp_override': '2019-01-01T01:01:01Z', 'pullstring': 'docker.io/nginx@{}'.format(test_digest), 'tag': 'docker.io/nginx:latest'}}})
]
for test_input, result in matrix:
assert api_utils.normalize_image_add_source(test_input) == result
def test_normalization_and_validation():
good_requests = [
# Basic Tag Case
(
{'tag': 'nginx'}, {'source': {'tag': {'pullstring': 'nginx'}}}
),
# Basic Tag w/Dockerfile
(
{'tag': 'docker.io/nginx', 'dockerfile': b64_dockerfile}, {'source': {'tag': {'pullstring': 'docker.io/nginx', 'dockerfile': b64_dockerfile}}}
),
# Basic Digest + Tag
(
{'tag': 'docker.io/library/nginx:latest', 'digest': test_digest, 'created_at': test_ts},
{'source': {'digest': {'pullstring': 'docker.io/library/nginx@{}'.format(test_digest), 'tag': 'docker.io/library/nginx:latest', 'creation_timestamp_override': test_ts}}}
),
# Basic Digest + Tag
(
{'tag': 'docker.io/library/nginx:latest', 'digest': test_digest, 'created_at': test_ts},
{'source': {'digest': {'pullstring': 'docker.io/library/nginx@{}'.format(test_digest), 'tag': 'docker.io/library/nginx:latest', 'creation_timestamp_override': test_ts}}}
),
# Basic Digest + Tag + Dodckerfile
(
{'tag': 'docker.io/library/nginx:latest', 'digest': test_digest, 'created_at': test_ts, 'dockerfile': b64_dockerfile},
{'source': {'digest': {'pullstring': 'docker.io/library/nginx@{}'.format(test_digest), 'tag': 'docker.io/library/nginx:latest', 'creation_timestamp_override': test_ts, 'dockerfile': b64_dockerfile}}}
),
# Digest pullstring + Tag + ts
(
{'tag': 'docker.io/library/nginx:latest', 'digest': 'docker.io/library/nginx@{}'.format(test_digest), 'created_at': test_ts},
{'source': {'digest': {'pullstring': 'docker.io/library/nginx@{}'.format(test_digest), 'tag': 'docker.io/library/nginx:latest', 'creation_timestamp_override': test_ts}}}
),
# Digest pullstring + Tag + ts
(
{'source': {'archive': {'digest': 'sha256:b9e8479820fb3a1a2f8ec426dd4ffc129e3a320392ce28dde6ae2d2d29ce2682'}}},
{'source': {'archive': {'digest': 'sha256:b9e8479820fb3a1a2f8ec426dd4ffc129e3a320392ce28dde6ae2d2d29ce2682'}}},
),
]
bad_requests = [
# Malformed tag
({'tag': 'docker.io/library/nginx@sha123'}, Exception),
# Tag + Digest only (no ts)
({'tag': 'docker.io/library/nginx:latest', 'digest': 'sh256:abc'}, Exception),
# Digest Only
({'digest': 'sh256:abc'}, Exception),
# Digest pullstring only
({'digest': 'docker.io/nginx@sha256:abc'}, Exception)
]
matrix = good_requests + bad_requests
for test_input, result in matrix:
if type(result) == type and issubclass(result, Exception):
with pytest.raises(result):
normalized = api_utils.normalize_image_add_source(test_input)
api_utils.validate_image_add_source(normalized, api_spec)
else:
normalized = api_utils.normalize_image_add_source(test_input)
api_utils.validate_image_add_source(normalized, api_spec)
assert normalized == result
def test_archive_source_validator():
logger.info("Testing archive source validator")
api_utils.validate_archive_source(archive_source={'digest':'sha256:b9e8479820fb3a1a2f8ec426dd4ffc129e3a320392ce28dde6ae2d2d29ce2682'},
api_schema=api_spec)
| 43.464115 | 211 | 0.647732 | import base64
import json
import yaml
import pytest
from anchore_engine.services.apiext.api.controllers import utils as api_utils
from anchore_engine.subsys import logger
logger.enable_test_logging('INFO')
spec_path = 'anchore_engine/services/apiext/swagger/swagger.yaml'
b64_dockerfile = str(base64.encodebytes(b'FROM stratch\nRUN echo "hello" > file\n'), 'utf-8')
raw_dockerfile = 'FROM stratch\nRUN echo "hello" > file\n'
def _load_spec(path):
with open(path) as f:
if path.endswith('yaml') or path.endswith('yml'):
return yaml.load(f, Loader=yaml.FullLoader)
else:
return json.load(f)
api_spec = _load_spec(spec_path)
test_digest = 'sha256:0123456789012345678901234567890123456789012345678901234567890123'
test_ts = '2019-01-01T01:01:01Z'
def test_valid_digest():
matrix = [
(test_digest, True),
(test_digest[:-1], False),
('sha', False),
('sha256:abc', False)
]
for input, result in matrix:
assert bool(api_utils.DIGEST_REGEX.match(input) is not None) == result
def test_validate_pullstring_tag():
logger.info('Testing tag-based pullstring validator')
matrix = [
('docker.io/library/nginx:latest', True),
('docker.io/nginx:latest', True),
('docker.io/library/nginx', True),
('docker.io/nginx', True),
('docker.io/nginx@{}'.format(test_digest), False),
('docker.io/library/nginx@{}'.format(test_digest), False),
('nginx@{}'.format(test_digest), False)
]
for input, result in matrix:
assert api_utils.validate_pullstring_is_tag(input) == result
def test_validate_pullstring_digest():
logger.info('Testing digest-based pullstring validator')
matrix = [
('docker.io/library/nginx:latest', False),
('docker.io/nginx:latest', False),
('docker.io/library/nginx', False),
('docker.io/nginx', False),
('docker.io/library/nginx@{}'.format(test_digest), True),
('docker.io/nginx@{}'.format(test_digest), True),
('nginx@{}'.format(test_digest), True),
('localhost:5000/my_nginx@{}'.format(test_digest), True)
]
for input, result in matrix:
assert api_utils.validate_pullstring_is_digest(input) == result
def test_tag_source_validator():
logger.info("Testing tag source validator")
api_utils.validate_tag_source(tag_source={'pullstring': 'docker.io/nginx:latest'}, api_schema=api_spec)
with pytest.raises(Exception):
api_utils.validate_tag_source(tag_source={'t': 'docker.io/nginx:latest'}, api_schema=api_spec)
with pytest.raises(Exception):
api_utils.validate_tag_source(tag_source={'pullstring': 'docker.io/nginx@{}'.format(test_digest)}, api_schema=api_spec)
def test_digest_source_validator():
logger.info("Testing digest source validator")
api_utils.validate_digest_source(digest_source={'pullstring': 'docker.io/nginx@{}'.format(test_digest), 'tag': 'docker.io/nginx:latest', 'creation_timestamp_override': '2019-01-01T01:01:01Z'},
api_schema=api_spec)
api_utils.validate_digest_source(digest_source={'pullstring': 'docker.io/library/nginx@{}'.format(test_digest), 'tag': 'docker.io/librarynginx:latest', 'creation_timestamp_override': '2019-01-01T01:01:01Z'},
api_schema=api_spec)
api_utils.validate_digest_source(digest_source={'pullstring': 'nginx@{}'.format(test_digest), 'tag': 'nginx:latest', 'creation_timestamp_override': '2019-01-01T01:01:01Z'},
api_schema=api_spec)
api_utils.validate_digest_source(digest_source={'pullstring': 'docker.io/nginx@{}'.format(test_digest), 'tag': 'docker.io/nginx:latest', 'creation_timestamp_override': '2019-01-01T01:01:01Z'},
api_schema=api_spec)
with pytest.raises(Exception):
api_utils.validate_digest_source(digest_source={'t': 'docker.io/nginx:latest'}, api_schema=api_spec)
with pytest.raises(Exception):
api_utils.validate_digest_source(digest_source={'pullstring': 'docker.io/nginx@{}'.format(test_digest)}, api_schema=api_spec)
def test_tag_normalization():
matrix = [
({'tag': 'docker.io/library/nginx:1.7'}, {'source': {'tag': {'pullstring': 'docker.io/library/nginx:1.7'}}}),
({'tag': 'docker.io/nginx'}, {'source': {'tag': {'pullstring': 'docker.io/nginx'}}}),
({'tag': 'docker.io/nginx@sha256:abc'}, {'source': {'tag': {'pullstring': 'docker.io/nginx@sha256:abc'}}})
]
for test_input, result in matrix:
if type(result) == type and issubclass(result, Exception):
with pytest.raises(result):
normalized = api_utils.normalize_image_add_source(test_input)
else:
assert api_utils.normalize_image_add_source(test_input) == result
def test_digest_normalization():
matrix = [
({'created_at': '2019-01-01T01:01:01Z', 'tag': 'docker.io/nginx', 'digest': test_digest},
{'source': {'digest': {'creation_timestamp_override': '2019-01-01T01:01:01Z', 'pullstring': 'docker.io/nginx@{}'.format(test_digest), 'tag': 'docker.io/nginx'}}}),
({'created_at': '2019-01-01T01:01:01Z', 'tag': 'docker.io/nginx:latest', 'digest': test_digest},
{'source': {'digest': {'creation_timestamp_override': '2019-01-01T01:01:01Z', 'pullstring': 'docker.io/nginx@{}'.format(test_digest), 'tag': 'docker.io/nginx:latest'}}})
]
for test_input, result in matrix:
assert api_utils.normalize_image_add_source(test_input) == result
def test_normalization_and_validation():
good_requests = [
(
{'tag': 'nginx'}, {'source': {'tag': {'pullstring': 'nginx'}}}
),
(
{'tag': 'docker.io/nginx', 'dockerfile': b64_dockerfile}, {'source': {'tag': {'pullstring': 'docker.io/nginx', 'dockerfile': b64_dockerfile}}}
),
(
{'tag': 'docker.io/library/nginx:latest', 'digest': test_digest, 'created_at': test_ts},
{'source': {'digest': {'pullstring': 'docker.io/library/nginx@{}'.format(test_digest), 'tag': 'docker.io/library/nginx:latest', 'creation_timestamp_override': test_ts}}}
),
(
{'tag': 'docker.io/library/nginx:latest', 'digest': test_digest, 'created_at': test_ts},
{'source': {'digest': {'pullstring': 'docker.io/library/nginx@{}'.format(test_digest), 'tag': 'docker.io/library/nginx:latest', 'creation_timestamp_override': test_ts}}}
),
(
{'tag': 'docker.io/library/nginx:latest', 'digest': test_digest, 'created_at': test_ts, 'dockerfile': b64_dockerfile},
{'source': {'digest': {'pullstring': 'docker.io/library/nginx@{}'.format(test_digest), 'tag': 'docker.io/library/nginx:latest', 'creation_timestamp_override': test_ts, 'dockerfile': b64_dockerfile}}}
),
(
{'tag': 'docker.io/library/nginx:latest', 'digest': 'docker.io/library/nginx@{}'.format(test_digest), 'created_at': test_ts},
{'source': {'digest': {'pullstring': 'docker.io/library/nginx@{}'.format(test_digest), 'tag': 'docker.io/library/nginx:latest', 'creation_timestamp_override': test_ts}}}
),
(
{'source': {'archive': {'digest': 'sha256:b9e8479820fb3a1a2f8ec426dd4ffc129e3a320392ce28dde6ae2d2d29ce2682'}}},
{'source': {'archive': {'digest': 'sha256:b9e8479820fb3a1a2f8ec426dd4ffc129e3a320392ce28dde6ae2d2d29ce2682'}}},
),
]
bad_requests = [
({'tag': 'docker.io/library/nginx@sha123'}, Exception),
({'tag': 'docker.io/library/nginx:latest', 'digest': 'sh256:abc'}, Exception),
({'digest': 'sh256:abc'}, Exception),
({'digest': 'docker.io/nginx@sha256:abc'}, Exception)
]
matrix = good_requests + bad_requests
for test_input, result in matrix:
if type(result) == type and issubclass(result, Exception):
with pytest.raises(result):
normalized = api_utils.normalize_image_add_source(test_input)
api_utils.validate_image_add_source(normalized, api_spec)
else:
normalized = api_utils.normalize_image_add_source(test_input)
api_utils.validate_image_add_source(normalized, api_spec)
assert normalized == result
def test_archive_source_validator():
logger.info("Testing archive source validator")
api_utils.validate_archive_source(archive_source={'digest':'sha256:b9e8479820fb3a1a2f8ec426dd4ffc129e3a320392ce28dde6ae2d2d29ce2682'},
api_schema=api_spec)
| true | true |
f72e285fa57c1479b7ac589e986ce382761b7ce1 | 2,349 | py | Python | apps/mascota/views.py | ecampetella/mascotas | c2b45a3ebe736eb9258081be05376796c7a8c5c4 | [
"Apache-2.0"
] | null | null | null | apps/mascota/views.py | ecampetella/mascotas | c2b45a3ebe736eb9258081be05376796c7a8c5c4 | [
"Apache-2.0"
] | null | null | null | apps/mascota/views.py | ecampetella/mascotas | c2b45a3ebe736eb9258081be05376796c7a8c5c4 | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.core import serializers
from django.urls import reverse_lazy
from django.views.generic import ListView, CreateView, UpdateView, DeleteView
from apps.mascota.forms import MascotaForm
from apps.mascota.models import Mascota
def listado(requesst):
lista = serializers.serialize('json', Mascota.objects.all(), fields=['nombre', 'sexo'])
return HttpResponse(lista, content_type='application/json')
def index(Request):
return render (Request, 'mascota/index.html')
def mascota_view(request):
if request.method == 'POST':
form = MascotaForm(request.POST)
if form.is_valid():
form.save()
return redirect('index')
else:
form = MascotaForm()
return render(request, 'mascota/mascota_form.html', {'form':form})
def mascota_list(request):
mascota = Mascota.objects.all()
contexto = {'mascotas':mascota}
return render(request, 'mascota/mascota_list.html', contexto)
def mascota_edit(request, id_mascota):
mascota = Mascota.objects.get(id=id_mascota)
if request.method == 'GET':
form = MascotaForm(instance=mascota)
else:
form = MascotaForm(request.POST, instance=mascota)
if form.is_valid():
form.save()
return redirect('mascota_listar')
return render (request,'mascota/mascota_form.html', {'form':form})
def mascota_delete(request, id_mascota):
mascota = Mascota.objects.get(id=id_mascota)
if request.method == 'POST':
mascota.delete()
return redirect('mascota_listar')
return render (request,'mascota/mascota_delete.html',{'mascota':mascota})
class MascotaList(ListView):
model = Mascota
template_name = 'mascota/mascota_list.html'
paginate_by = 3
class MascotaCreate(CreateView):
model = Mascota
form_class = MascotaForm
template_name = 'mascota/mascota_form.html'
success_url = reverse_lazy('mascota_listar')
class MascotaUpdate(UpdateView):
model = Mascota
form_class = MascotaForm
template_name = 'mascota/mascota_form.html'
success_url = reverse_lazy('mascota_listar')
class MascotaDelete(DeleteView):
model = Mascota
template_name = 'mascota/mascota_delete.html'
success_url = reverse_lazy('mascota_listar')
| 27.635294 | 91 | 0.707961 | from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.core import serializers
from django.urls import reverse_lazy
from django.views.generic import ListView, CreateView, UpdateView, DeleteView
from apps.mascota.forms import MascotaForm
from apps.mascota.models import Mascota
def listado(requesst):
lista = serializers.serialize('json', Mascota.objects.all(), fields=['nombre', 'sexo'])
return HttpResponse(lista, content_type='application/json')
def index(Request):
return render (Request, 'mascota/index.html')
def mascota_view(request):
if request.method == 'POST':
form = MascotaForm(request.POST)
if form.is_valid():
form.save()
return redirect('index')
else:
form = MascotaForm()
return render(request, 'mascota/mascota_form.html', {'form':form})
def mascota_list(request):
mascota = Mascota.objects.all()
contexto = {'mascotas':mascota}
return render(request, 'mascota/mascota_list.html', contexto)
def mascota_edit(request, id_mascota):
mascota = Mascota.objects.get(id=id_mascota)
if request.method == 'GET':
form = MascotaForm(instance=mascota)
else:
form = MascotaForm(request.POST, instance=mascota)
if form.is_valid():
form.save()
return redirect('mascota_listar')
return render (request,'mascota/mascota_form.html', {'form':form})
def mascota_delete(request, id_mascota):
mascota = Mascota.objects.get(id=id_mascota)
if request.method == 'POST':
mascota.delete()
return redirect('mascota_listar')
return render (request,'mascota/mascota_delete.html',{'mascota':mascota})
class MascotaList(ListView):
model = Mascota
template_name = 'mascota/mascota_list.html'
paginate_by = 3
class MascotaCreate(CreateView):
model = Mascota
form_class = MascotaForm
template_name = 'mascota/mascota_form.html'
success_url = reverse_lazy('mascota_listar')
class MascotaUpdate(UpdateView):
model = Mascota
form_class = MascotaForm
template_name = 'mascota/mascota_form.html'
success_url = reverse_lazy('mascota_listar')
class MascotaDelete(DeleteView):
model = Mascota
template_name = 'mascota/mascota_delete.html'
success_url = reverse_lazy('mascota_listar')
| true | true |
f72e2a94bddcc6662ddbec5a3ed288d1de2fd3ed | 841 | py | Python | tests/util_test.py | panfill/pandoc-tables | ba26525e3e9c6ddab6236276ec9a9ac3508e31f5 | [
"BSD-3-Clause"
] | 74 | 2016-11-20T14:19:06.000Z | 2022-01-27T13:53:45.000Z | tests/util_test.py | panfill/pandoc-tables | ba26525e3e9c6ddab6236276ec9a9ac3508e31f5 | [
"BSD-3-Clause"
] | 57 | 2016-12-23T01:37:59.000Z | 2022-03-15T10:14:49.000Z | tests/util_test.py | panfill/pandoc-tables | ba26525e3e9c6ddab6236276ec9a9ac3508e31f5 | [
"BSD-3-Clause"
] | 19 | 2017-07-31T17:32:01.000Z | 2022-03-09T17:44:24.000Z | from pytest import mark
from pantable.util import convert_texts, convert_texts_fast, eq_panflute_elems
# construct some texts cases
texts_1 = [
'some **markdown** here',
'and ~~some~~ other?'
]
texts_2 = [
'some *very* intersting markdown [example]{#so_fancy}',
'''# Comical
Text
# Totally comical
Text'''
]
textss = [texts_1, texts_2, texts_1 + texts_2]
# reference answers
elemss = [convert_texts(texts) for texts in textss]
@mark.parametrize('elems,texts', zip(elemss, textss))
def test_convert_texts_markdown_to_panflute(elems, texts):
assert eq_panflute_elems(elems, convert_texts_fast(texts))
@mark.parametrize('elems,texts', zip(elemss, textss))
def test_convert_texts_panflute_to_markdown(elems, texts):
assert texts == convert_texts_fast(elems, input_format='panflute', output_format='markdown')
| 23.361111 | 96 | 0.743163 | from pytest import mark
from pantable.util import convert_texts, convert_texts_fast, eq_panflute_elems
texts_1 = [
'some **markdown** here',
'and ~~some~~ other?'
]
texts_2 = [
'some *very* intersting markdown [example]{#so_fancy}',
'''# Comical
Text
# Totally comical
Text'''
]
textss = [texts_1, texts_2, texts_1 + texts_2]
elemss = [convert_texts(texts) for texts in textss]
@mark.parametrize('elems,texts', zip(elemss, textss))
def test_convert_texts_markdown_to_panflute(elems, texts):
assert eq_panflute_elems(elems, convert_texts_fast(texts))
@mark.parametrize('elems,texts', zip(elemss, textss))
def test_convert_texts_panflute_to_markdown(elems, texts):
assert texts == convert_texts_fast(elems, input_format='panflute', output_format='markdown')
| true | true |
f72e2be002132a3159a33734ff15192f64760aea | 801 | py | Python | scripts/mrep.py | akikuno/rosalind | 7015dc63e493d870e5789e99f2ee523a9b1f5ab9 | [
"MIT"
] | null | null | null | scripts/mrep.py | akikuno/rosalind | 7015dc63e493d870e5789e99f2ee523a9b1f5ab9 | [
"MIT"
] | null | null | null | scripts/mrep.py | akikuno/rosalind | 7015dc63e493d870e5789e99f2ee523a9b1f5ab9 | [
"MIT"
] | null | null | null | # https://rosalind.info/problems/mrep/
def fmtfa(fasta: list):
prev = True
header = []
seq = []
for f in fasta:
if ">" in f:
header.append(f[1:])
prev = True
elif prev:
seq.append(f)
prev = False
else:
seq[-1] += f
return header, seq
# INPUT -------------------------------------------
file_in = "sample/dataset/mrep.txt"
file_out = "sample/output/mrep.txt"
# file_in = "case/dataset/mrep.txt"
with open(file_in) as f:
data = f.read().splitlines()
with open(file_out) as f:
outcome = f.read().splitlines()
# MAIN -------------------------------------------
# OUTPUT -------------------------------------------
with open("case/output/mrep.txt", "w") as f:
f.write()
# END
| 18.627907 | 52 | 0.451935 |
def fmtfa(fasta: list):
prev = True
header = []
seq = []
for f in fasta:
if ">" in f:
header.append(f[1:])
prev = True
elif prev:
seq.append(f)
prev = False
else:
seq[-1] += f
return header, seq
file_in = "sample/dataset/mrep.txt"
file_out = "sample/output/mrep.txt"
with open(file_in) as f:
data = f.read().splitlines()
with open(file_out) as f:
outcome = f.read().splitlines()
with open("case/output/mrep.txt", "w") as f:
f.write()
| true | true |
f72e2c20014b014adcc22ea0886c8a4731a2cba2 | 9,241 | py | Python | trader/strategy.py | freshjang/MyKiwoom | 6342ec7ba8da55194bb473f9052d87f7fa1a640e | [
"MIT"
] | null | null | null | trader/strategy.py | freshjang/MyKiwoom | 6342ec7ba8da55194bb473f9052d87f7fa1a640e | [
"MIT"
] | null | null | null | trader/strategy.py | freshjang/MyKiwoom | 6342ec7ba8da55194bb473f9052d87f7fa1a640e | [
"MIT"
] | null | null | null | import os
import sys
import psutil
import numpy as np
import pandas as pd
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utility.setting import ui_num, DICT_SET, columns_gj
from utility.static import now, timedelta_sec, thread_decorator, strf_time, float2str1p6
class Strategy:
def __init__(self, qlist):
"""
0 1 2 3 4 5 6 7 8 9 10 11
windowQ, traderQ, receivQ, stgQ, soundQ, queryQ, teleQ, hoga1Q, hoga2Q, chart1Q, chart2Q, chart3Q,
chart4Q, chart5Q, chart6Q, chart7Q, chart8Q, chart9Q, chart10Q, tick1Q, tick2Q, tick3Q, tick4Q
12 13 14 15 16 17 18 19 20 21 22
"""
self.windowQ = qlist[0]
self.traderQ = qlist[1]
self.stgQ = qlist[3]
self.list_buy = [] # 매수주문리스트
self.list_sell = [] # 매도주문리스트
self.int_tujagm = 0 # 종목당 투자금
self.startjjstg = False # 장중전략
self.dict_gsjm = {} # key: 종목코드, value: DataFrame
self.dict_data = {} # key: 종목코드, value: list
self.dict_high = {} # key: 종목코드, value: float
self.dict_time = {
'관심종목': now(),
'부가정보': now(),
'연산시간': now()
}
self.dict_intg = {
'스레드': 0,
'시피유': 0.,
'메모리': 0.
}
self.Start()
def Start(self):
while True:
data = self.stgQ.get()
if type(data) == int:
self.int_tujagm = data
elif type(data) == list:
if len(data) == 2:
self.UpdateList(data[0], data[1])
elif len(data) == 38:
self.BuyStrategy(data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8],
data[9], data[10], data[11], data[12], data[13], data[14], data[15], data[16],
data[17], data[18], data[19], data[20], data[21], data[22], data[23], data[24],
data[25], data[26], data[27], data[28], data[29], data[30], data[31], data[32],
data[33], data[34], data[35], data[36], data[37])
elif len(data) == 6:
self.SellStrategy(data[0], data[1], data[2], data[3], data[4], data[5])
elif data == '전략프로세스종료':
break
if now() > self.dict_time['관심종목']:
self.windowQ.put([ui_num['관심종목'], self.dict_gsjm])
self.dict_time['관심종목'] = timedelta_sec(1)
if now() > self.dict_time['부가정보']:
self.UpdateInfo()
self.dict_time['부가정보'] = timedelta_sec(2)
self.windowQ.put([1, '시스템 명령 실행 알림 - 전략 연산 프로세스 종료'])
sys.exit()
def UpdateList(self, gubun, code):
if '조건진입' in gubun:
if code not in self.dict_gsjm.keys():
if int(strf_time('%H%M%S')) < 100000:
data = np.zeros((DICT_SET['장초평균값계산틱수'] + 2, len(columns_gj))).tolist()
else:
data = np.zeros((DICT_SET['장중평균값계산틱수'] + 2, len(columns_gj))).tolist()
df = pd.DataFrame(data, columns=columns_gj)
self.dict_gsjm[code] = df.copy()
elif gubun == '조건이탈':
if code in self.dict_gsjm.keys():
del self.dict_gsjm[code]
elif gubun in ['매수완료', '매수취소']:
if code in self.list_buy:
self.list_buy.remove(code)
elif gubun in ['매도완료', '매도취소']:
if code in self.list_sell:
self.list_sell.remove(code)
if code in self.dict_high.keys():
del self.dict_high[code]
def BuyStrategy(self, 현재가, 시가, 고가, 저가, 등락율, 당일거래대금, 체결강도,
초당매수수량, 초당매도수량, VI해제시간, VI아래5호가, 매도총잔량, 매수총잔량,
매도호가5, 매도호가4, 매도호가3, 매도호가2, 매도호가1, 매수호가1, 매수호가2, 매수호가3, 매수호가4, 매수호가5,
매도잔량5, 매도잔량4, 매도잔량3, 매도잔량2, 매도잔량1, 매수잔량1, 매수잔량2, 매수잔량3, 매수잔량4, 매수잔량5,
종목코드, 체결시간, 틱수신시간, 종목명, 잔고종목):
if 종목코드 not in self.dict_gsjm.keys():
return
self.CheckStrategy()
고저평균 = round((고가 + 저가) / 2)
고저평균대비등락율 = round((현재가 / 고저평균 - 1) * 100, 2)
직전당일거래대금 = self.dict_gsjm[종목코드]['당일거래대금'][0]
초당거래대금 = 0 if 직전당일거래대금 == 0 else int(당일거래대금 - 직전당일거래대금)
구분 = '장초' if int(strf_time('%H%M%S')) < 100000 else '장중'
평균값계산틱수 = DICT_SET[f'{구분}평균값계산틱수']
평균값인덱스 = 평균값계산틱수 + 1
self.dict_gsjm[종목코드] = self.dict_gsjm[종목코드].shift(1)
self.dict_gsjm[종목코드].at[0] = 등락율, 고저평균대비등락율, 초당거래대금, 당일거래대금, 체결강도, 0.
if self.dict_gsjm[종목코드]['체결강도'][평균값계산틱수] != 0.:
초당거래대금평균 = int(self.dict_gsjm[종목코드]['초당거래대금'][1:평균값인덱스].mean())
체결강도평균 = round(self.dict_gsjm[종목코드]['체결강도'][1:평균값인덱스].mean(), 2)
최고체결강도 = round(self.dict_gsjm[종목코드]['체결강도'][1:평균값인덱스].max(), 2)
self.dict_gsjm[종목코드].at[평균값인덱스] = 0., 0., 초당거래대금평균, 0, 체결강도평균, 최고체결강도
매수 = True
직전체결강도 = self.dict_gsjm[종목코드]['체결강도'][1]
self.dict_data[종목코드] = [
현재가, 시가, 고가, 저가, 등락율, 고저평균대비등락율, 당일거래대금, 초당거래대금, 초당거래대금평균, 체결강도,
체결강도평균, 최고체결강도, 직전체결강도, 초당매수수량, 초당매도수량, VI해제시간, VI아래5호가, 매도총잔량, 매수총잔량,
매도호가5, 매도호가4, 매도호가3, 매도호가2, 매도호가1, 매수호가1, 매수호가2, 매수호가3, 매수호가4, 매수호가5,
매도잔량5, 매도잔량4, 매도잔량3, 매도잔량2, 매도잔량1, 매수잔량1, 매수잔량2, 매수잔량3, 매수잔량4, 매수잔량5
]
if 잔고종목:
return
if 종목코드 in self.list_buy:
return
# 전략 비공개
if 매수:
매수수량 = int(self.int_tujagm / 현재가)
if 매수수량 > 0:
남은수량 = 매수수량
직전남은수량 = 매수수량
매수금액 = 0
호가정보 = {매도호가1: 매도잔량1}
for 매도호가, 매도잔량 in 호가정보.items():
남은수량 -= 매도잔량
if 남은수량 <= 0:
매수금액 += 매도호가 * 직전남은수량
break
else:
매수금액 += 매도호가 * 매도잔량
직전남은수량 = 남은수량
if 남은수량 <= 0:
예상체결가 = round(매수금액 / 매수수량, 2)
self.list_buy.append(종목코드)
self.traderQ.put(['매수', 종목코드, 종목명, 예상체결가, 매수수량])
if now() > self.dict_time['연산시간']:
gap = float2str1p6((now() - 틱수신시간).total_seconds())
self.windowQ.put([1, f'전략스 연산 시간 알림 - 수신시간과 연산시간의 차이는 [{gap}]초입니다.'])
self.dict_time['연산시간'] = timedelta_sec(60)
def SellStrategy(self, 종목코드, 종목명, 수익률, 보유수량, 현재가, 매수시간):
if 종목코드 not in self.dict_gsjm.keys() or 종목코드 not in self.dict_data.keys():
return
if 종목코드 in self.list_sell:
return
매도 = False
구분 = '장초' if int(strf_time('%H%M%S')) < 100000 else '장중'
현재가, 시가, 고가, 저가, 등락율, 고저평균대비등락율, 당일거래대금, 초당거래대금, 초당거래대금평균, 체결강도, \
체결강도평균, 최고체결강도, 직전체결강도, 초당매수수량, 초당매도수량, VI해제시간, VI아래5호가, 매도총잔량, 매수총잔량, \
매도호가5, 매도호가4, 매도호가3, 매도호가2, 매도호가1, 매수호가1, 매수호가2, 매수호가3, 매수호가4, 매수호가5, \
매도잔량5, 매도잔량4, 매도잔량3, 매도잔량2, 매도잔량1, 매수잔량1, 매수잔량2, 매수잔량3, 매수잔량4, 매수잔량5 = \
self.dict_data[종목코드]
if 종목코드 not in self.dict_high.keys():
self.dict_high[종목코드] = 수익률
elif 수익률 > self.dict_high[종목코드]:
self.dict_high[종목코드] = 수익률
최고수익률 = self.dict_high[종목코드]
""" 매도 조건 예시 """
if 수익률 <= -2 or 수익률 >= 3:
매도 = True
# 전략 비공개
if 매도:
남은수량 = 보유수량
직전남은수량 = 보유수량
매도금액 = 0
호가정보 = {매수호가1: 매수잔량1, 매수호가2: 매수잔량2, 매수호가3: 매수잔량3, 매수호가4: 매수잔량4, 매수호가5: 매수잔량5}
for 매수호가, 매수잔량 in 호가정보.items():
남은수량 -= 매수잔량
if 남은수량 <= 0:
매도금액 += 매수호가 * 직전남은수량
break
else:
매도금액 += 매수호가 * 매수잔량
직전남은수량 = 남은수량
if 남은수량 <= 0:
예상체결가 = round(매도금액 / 보유수량, 2)
self.list_sell.append(종목코드)
self.traderQ.put(['매도', 종목코드, 종목명, 예상체결가, 보유수량])
def CheckStrategy(self):
if int(strf_time('%H%M%S')) >= 100000 and not self.startjjstg:
for code in list(self.dict_gsjm.keys()):
data = np.zeros((DICT_SET['장중평균값계산틱수'] + 2, len(columns_gj))).tolist()
df = pd.DataFrame(data, columns=columns_gj)
self.dict_gsjm[code] = df.copy()
self.startjjstg = True
@thread_decorator
def UpdateInfo(self):
info = [6, self.dict_intg['메모리'], self.dict_intg['스레드'], self.dict_intg['시피유']]
self.windowQ.put(info)
self.UpdateSysinfo()
def UpdateSysinfo(self):
p = psutil.Process(os.getpid())
self.dict_intg['메모리'] = round(p.memory_info()[0] / 2 ** 20.86, 2)
self.dict_intg['스레드'] = p.num_threads()
self.dict_intg['시피유'] = round(p.cpu_percent(interval=2) / 2, 2)
| 41.071111 | 117 | 0.489125 | import os
import sys
import psutil
import numpy as np
import pandas as pd
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utility.setting import ui_num, DICT_SET, columns_gj
from utility.static import now, timedelta_sec, thread_decorator, strf_time, float2str1p6
class Strategy:
def __init__(self, qlist):
self.windowQ = qlist[0]
self.traderQ = qlist[1]
self.stgQ = qlist[3]
self.list_buy = []
self.list_sell = []
self.int_tujagm = 0
self.startjjstg = False
self.dict_gsjm = {}
self.dict_data = {}
self.dict_high = {}
self.dict_time = {
'관심종목': now(),
'부가정보': now(),
'연산시간': now()
}
self.dict_intg = {
'스레드': 0,
'시피유': 0.,
'메모리': 0.
}
self.Start()
def Start(self):
while True:
data = self.stgQ.get()
if type(data) == int:
self.int_tujagm = data
elif type(data) == list:
if len(data) == 2:
self.UpdateList(data[0], data[1])
elif len(data) == 38:
self.BuyStrategy(data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8],
data[9], data[10], data[11], data[12], data[13], data[14], data[15], data[16],
data[17], data[18], data[19], data[20], data[21], data[22], data[23], data[24],
data[25], data[26], data[27], data[28], data[29], data[30], data[31], data[32],
data[33], data[34], data[35], data[36], data[37])
elif len(data) == 6:
self.SellStrategy(data[0], data[1], data[2], data[3], data[4], data[5])
elif data == '전략프로세스종료':
break
if now() > self.dict_time['관심종목']:
self.windowQ.put([ui_num['관심종목'], self.dict_gsjm])
self.dict_time['관심종목'] = timedelta_sec(1)
if now() > self.dict_time['부가정보']:
self.UpdateInfo()
self.dict_time['부가정보'] = timedelta_sec(2)
self.windowQ.put([1, '시스템 명령 실행 알림 - 전략 연산 프로세스 종료'])
sys.exit()
def UpdateList(self, gubun, code):
if '조건진입' in gubun:
if code not in self.dict_gsjm.keys():
if int(strf_time('%H%M%S')) < 100000:
data = np.zeros((DICT_SET['장초평균값계산틱수'] + 2, len(columns_gj))).tolist()
else:
data = np.zeros((DICT_SET['장중평균값계산틱수'] + 2, len(columns_gj))).tolist()
df = pd.DataFrame(data, columns=columns_gj)
self.dict_gsjm[code] = df.copy()
elif gubun == '조건이탈':
if code in self.dict_gsjm.keys():
del self.dict_gsjm[code]
elif gubun in ['매수완료', '매수취소']:
if code in self.list_buy:
self.list_buy.remove(code)
elif gubun in ['매도완료', '매도취소']:
if code in self.list_sell:
self.list_sell.remove(code)
if code in self.dict_high.keys():
del self.dict_high[code]
def BuyStrategy(self, 현재가, 시가, 고가, 저가, 등락율, 당일거래대금, 체결강도,
초당매수수량, 초당매도수량, VI해제시간, VI아래5호가, 매도총잔량, 매수총잔량,
매도호가5, 매도호가4, 매도호가3, 매도호가2, 매도호가1, 매수호가1, 매수호가2, 매수호가3, 매수호가4, 매수호가5,
매도잔량5, 매도잔량4, 매도잔량3, 매도잔량2, 매도잔량1, 매수잔량1, 매수잔량2, 매수잔량3, 매수잔량4, 매수잔량5,
종목코드, 체결시간, 틱수신시간, 종목명, 잔고종목):
if 종목코드 not in self.dict_gsjm.keys():
return
self.CheckStrategy()
고저평균 = round((고가 + 저가) / 2)
고저평균대비등락율 = round((현재가 / 고저평균 - 1) * 100, 2)
직전당일거래대금 = self.dict_gsjm[종목코드]['당일거래대금'][0]
초당거래대금 = 0 if 직전당일거래대금 == 0 else int(당일거래대금 - 직전당일거래대금)
구분 = '장초' if int(strf_time('%H%M%S')) < 100000 else '장중'
평균값계산틱수 = DICT_SET[f'{구분}평균값계산틱수']
평균값인덱스 = 평균값계산틱수 + 1
self.dict_gsjm[종목코드] = self.dict_gsjm[종목코드].shift(1)
self.dict_gsjm[종목코드].at[0] = 등락율, 고저평균대비등락율, 초당거래대금, 당일거래대금, 체결강도, 0.
if self.dict_gsjm[종목코드]['체결강도'][평균값계산틱수] != 0.:
초당거래대금평균 = int(self.dict_gsjm[종목코드]['초당거래대금'][1:평균값인덱스].mean())
체결강도평균 = round(self.dict_gsjm[종목코드]['체결강도'][1:평균값인덱스].mean(), 2)
최고체결강도 = round(self.dict_gsjm[종목코드]['체결강도'][1:평균값인덱스].max(), 2)
self.dict_gsjm[종목코드].at[평균값인덱스] = 0., 0., 초당거래대금평균, 0, 체결강도평균, 최고체결강도
매수 = True
직전체결강도 = self.dict_gsjm[종목코드]['체결강도'][1]
self.dict_data[종목코드] = [
현재가, 시가, 고가, 저가, 등락율, 고저평균대비등락율, 당일거래대금, 초당거래대금, 초당거래대금평균, 체결강도,
체결강도평균, 최고체결강도, 직전체결강도, 초당매수수량, 초당매도수량, VI해제시간, VI아래5호가, 매도총잔량, 매수총잔량,
매도호가5, 매도호가4, 매도호가3, 매도호가2, 매도호가1, 매수호가1, 매수호가2, 매수호가3, 매수호가4, 매수호가5,
매도잔량5, 매도잔량4, 매도잔량3, 매도잔량2, 매도잔량1, 매수잔량1, 매수잔량2, 매수잔량3, 매수잔량4, 매수잔량5
]
if 잔고종목:
return
if 종목코드 in self.list_buy:
return
if 매수:
매수수량 = int(self.int_tujagm / 현재가)
if 매수수량 > 0:
남은수량 = 매수수량
직전남은수량 = 매수수량
매수금액 = 0
호가정보 = {매도호가1: 매도잔량1}
for 매도호가, 매도잔량 in 호가정보.items():
남은수량 -= 매도잔량
if 남은수량 <= 0:
매수금액 += 매도호가 * 직전남은수량
break
else:
매수금액 += 매도호가 * 매도잔량
직전남은수량 = 남은수량
if 남은수량 <= 0:
예상체결가 = round(매수금액 / 매수수량, 2)
self.list_buy.append(종목코드)
self.traderQ.put(['매수', 종목코드, 종목명, 예상체결가, 매수수량])
if now() > self.dict_time['연산시간']:
gap = float2str1p6((now() - 틱수신시간).total_seconds())
self.windowQ.put([1, f'전략스 연산 시간 알림 - 수신시간과 연산시간의 차이는 [{gap}]초입니다.'])
self.dict_time['연산시간'] = timedelta_sec(60)
def SellStrategy(self, 종목코드, 종목명, 수익률, 보유수량, 현재가, 매수시간):
if 종목코드 not in self.dict_gsjm.keys() or 종목코드 not in self.dict_data.keys():
return
if 종목코드 in self.list_sell:
return
매도 = False
구분 = '장초' if int(strf_time('%H%M%S')) < 100000 else '장중'
현재가, 시가, 고가, 저가, 등락율, 고저평균대비등락율, 당일거래대금, 초당거래대금, 초당거래대금평균, 체결강도, \
체결강도평균, 최고체결강도, 직전체결강도, 초당매수수량, 초당매도수량, VI해제시간, VI아래5호가, 매도총잔량, 매수총잔량, \
매도호가5, 매도호가4, 매도호가3, 매도호가2, 매도호가1, 매수호가1, 매수호가2, 매수호가3, 매수호가4, 매수호가5, \
매도잔량5, 매도잔량4, 매도잔량3, 매도잔량2, 매도잔량1, 매수잔량1, 매수잔량2, 매수잔량3, 매수잔량4, 매수잔량5 = \
self.dict_data[종목코드]
if 종목코드 not in self.dict_high.keys():
self.dict_high[종목코드] = 수익률
elif 수익률 > self.dict_high[종목코드]:
self.dict_high[종목코드] = 수익률
최고수익률 = self.dict_high[종목코드]
if 수익률 <= -2 or 수익률 >= 3:
매도 = True
if 매도:
남은수량 = 보유수량
직전남은수량 = 보유수량
매도금액 = 0
호가정보 = {매수호가1: 매수잔량1, 매수호가2: 매수잔량2, 매수호가3: 매수잔량3, 매수호가4: 매수잔량4, 매수호가5: 매수잔량5}
for 매수호가, 매수잔량 in 호가정보.items():
남은수량 -= 매수잔량
if 남은수량 <= 0:
매도금액 += 매수호가 * 직전남은수량
break
else:
매도금액 += 매수호가 * 매수잔량
직전남은수량 = 남은수량
if 남은수량 <= 0:
예상체결가 = round(매도금액 / 보유수량, 2)
self.list_sell.append(종목코드)
self.traderQ.put(['매도', 종목코드, 종목명, 예상체결가, 보유수량])
def CheckStrategy(self):
if int(strf_time('%H%M%S')) >= 100000 and not self.startjjstg:
for code in list(self.dict_gsjm.keys()):
data = np.zeros((DICT_SET['장중평균값계산틱수'] + 2, len(columns_gj))).tolist()
df = pd.DataFrame(data, columns=columns_gj)
self.dict_gsjm[code] = df.copy()
self.startjjstg = True
@thread_decorator
def UpdateInfo(self):
info = [6, self.dict_intg['메모리'], self.dict_intg['스레드'], self.dict_intg['시피유']]
self.windowQ.put(info)
self.UpdateSysinfo()
def UpdateSysinfo(self):
p = psutil.Process(os.getpid())
self.dict_intg['메모리'] = round(p.memory_info()[0] / 2 ** 20.86, 2)
self.dict_intg['스레드'] = p.num_threads()
self.dict_intg['시피유'] = round(p.cpu_percent(interval=2) / 2, 2)
| true | true |
f72e2fa4abeaaaa3579e087d88b76ff66cf23dc8 | 1,018 | py | Python | vestlus/views/membership_detail.py | lehvitus/vestlus | 6d9c8b1de7821e544e0c7c99f42d60f8f3805557 | [
"BSD-3-Clause"
] | 12 | 2020-07-02T23:36:02.000Z | 2020-12-15T07:29:20.000Z | vestlus/views/membership_detail.py | lehvitus/vestlus | 6d9c8b1de7821e544e0c7c99f42d60f8f3805557 | [
"BSD-3-Clause"
] | null | null | null | vestlus/views/membership_detail.py | lehvitus/vestlus | 6d9c8b1de7821e544e0c7c99f42d60f8f3805557 | [
"BSD-3-Clause"
] | null | null | null | from django.utils import timezone
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.views.generic import DetailView
from django.urls import path
from .routes import routes
from ..models import Membership, GroupMessage
@method_decorator([login_required], name='dispatch')
class MembershipDetailView(DetailView):
model = Membership
context_object_name = 'membership'
template_name = 'membership_detail.html'
# def get_queryset(self):
# return Membership.objects.get_for_user(user=self.request.user)
def get_context_data(self, **kwargs):
messages = GroupMessage.custom_objects.get_for_channel(
channel=self.object.channel,
user=self.object.user
)
context = super().get_context_data(**kwargs)
context['messages'] = messages
return context
routes.append(
path('memberships/<slug:slug>', MembershipDetailView.as_view(), name='membership-detail')
)
| 30.848485 | 93 | 0.735756 | from django.utils import timezone
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.views.generic import DetailView
from django.urls import path
from .routes import routes
from ..models import Membership, GroupMessage
@method_decorator([login_required], name='dispatch')
class MembershipDetailView(DetailView):
model = Membership
context_object_name = 'membership'
template_name = 'membership_detail.html'
def get_context_data(self, **kwargs):
messages = GroupMessage.custom_objects.get_for_channel(
channel=self.object.channel,
user=self.object.user
)
context = super().get_context_data(**kwargs)
context['messages'] = messages
return context
routes.append(
path('memberships/<slug:slug>', MembershipDetailView.as_view(), name='membership-detail')
)
| true | true |
f72e30a9cc0654d4161f78820a66eb9ac6875248 | 5,073 | py | Python | nemo_text_processing/inverse_text_normalization/es/taggers/decimal.py | hamjam/NeMo | b3484d32e1317666151f931bfa39867d88ed8658 | [
"Apache-2.0"
] | 1 | 2022-03-08T02:48:44.000Z | 2022-03-08T02:48:44.000Z | nemo_text_processing/inverse_text_normalization/es/taggers/decimal.py | hamjam/NeMo | b3484d32e1317666151f931bfa39867d88ed8658 | [
"Apache-2.0"
] | null | null | null | nemo_text_processing/inverse_text_normalization/es/taggers/decimal.py | hamjam/NeMo | b3484d32e1317666151f931bfa39867d88ed8658 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.inverse_text_normalization.es.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_DIGIT,
GraphFst,
delete_extra_space,
delete_space,
)
try:
import pynini
from pynini.lib import pynutil
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
def get_quantity(decimal: 'pynini.FstLike', cardinal_up_to_million: 'pynini.FstLike') -> 'pynini.FstLike':
"""
Returns FST that transforms either a cardinal or decimal followed by a quantity into a numeral,
e.g. one million -> integer_part: "1" quantity: "million"
e.g. one point five million -> integer_part: "1" fractional_part: "5" quantity: "million"
Args:
decimal: decimal FST
cardinal_up_to_million: cardinal FST
"""
numbers = cardinal_up_to_million @ (
pynutil.delete(pynini.closure("0")) + pynini.difference(NEMO_DIGIT, "0") + pynini.closure(NEMO_DIGIT)
)
suffix = pynini.union(
"millón",
"millones",
"millardo",
"millardos",
"billón",
"billones",
"trillón",
"trillones",
"cuatrillón",
"cuatrillones",
)
res = (
pynutil.insert("integer_part: \"")
+ numbers
+ pynutil.insert("\"")
+ delete_extra_space
+ pynutil.insert("quantity: \"")
+ suffix
+ pynutil.insert("\"")
)
res |= decimal + delete_extra_space + pynutil.insert("quantity: \"") + suffix + pynutil.insert("\"")
return res
class DecimalFst(GraphFst):
"""
Finite state transducer for classifying decimal
Decimal point is either "." or ",", determined by whether "punto" or "coma" is spoken.
e.g. menos uno coma dos seis -> decimal { negative: "true" integer_part: "1" morphosyntactic_features: "," fractional_part: "26" }
e.g. menos uno punto dos seis -> decimal { negative: "true" integer_part: "1" morphosyntactic_features: "." fractional_part: "26" }
This decimal rule assumes that decimals can be pronounced as:
(a cardinal) + ('coma' or 'punto') plus (any sequence of cardinals <1000, including 'zero')
Also writes large numbers in shortened form, e.g.
e.g. uno coma dos seis millón -> decimal { negative: "false" integer_part: "1" morphosyntactic_features: "," fractional_part: "26" quantity: "millón" }
e.g. dos millones -> decimal { negative: "false" integer_part: "2" quantity: "millones" }
e.g. mil ochocientos veinticuatro millones -> decimal { negative: "false" integer_part: "1824" quantity: "millones" }
Args:
cardinal: CardinalFst
"""
def __init__(self, cardinal: GraphFst):
super().__init__(name="decimal", kind="classify")
# number after decimal point can be any series of cardinals <1000, including 'zero'
graph_decimal = cardinal.numbers_up_to_thousand
graph_decimal = pynini.closure(graph_decimal + delete_space) + graph_decimal
self.graph = graph_decimal
# decimal point can be denoted by 'coma' or 'punto'
decimal_point = pynini.cross("coma", "morphosyntactic_features: \",\"")
decimal_point |= pynini.cross("punto", "morphosyntactic_features: \".\"")
optional_graph_negative = pynini.closure(
pynutil.insert("negative: ") + pynini.cross("menos", "\"true\"") + delete_extra_space, 0, 1
)
graph_fractional = pynutil.insert("fractional_part: \"") + graph_decimal + pynutil.insert("\"")
cardinal_graph = cardinal.graph_no_exception | pynini.string_file(get_abs_path("data/numbers/zero.tsv"))
graph_integer = pynutil.insert("integer_part: \"") + cardinal_graph + pynutil.insert("\"")
final_graph_wo_sign = (
pynini.closure(graph_integer + delete_extra_space, 0, 1)
+ decimal_point
+ delete_extra_space
+ graph_fractional
)
final_graph = optional_graph_negative + final_graph_wo_sign
self.final_graph_wo_negative = final_graph_wo_sign | get_quantity(
final_graph_wo_sign, cardinal.numbers_up_to_million
)
final_graph |= optional_graph_negative + get_quantity(final_graph_wo_sign, cardinal.numbers_up_to_million)
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| 40.91129 | 163 | 0.667258 |
from nemo_text_processing.inverse_text_normalization.es.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_DIGIT,
GraphFst,
delete_extra_space,
delete_space,
)
try:
import pynini
from pynini.lib import pynutil
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
def get_quantity(decimal: 'pynini.FstLike', cardinal_up_to_million: 'pynini.FstLike') -> 'pynini.FstLike':
numbers = cardinal_up_to_million @ (
pynutil.delete(pynini.closure("0")) + pynini.difference(NEMO_DIGIT, "0") + pynini.closure(NEMO_DIGIT)
)
suffix = pynini.union(
"millón",
"millones",
"millardo",
"millardos",
"billón",
"billones",
"trillón",
"trillones",
"cuatrillón",
"cuatrillones",
)
res = (
pynutil.insert("integer_part: \"")
+ numbers
+ pynutil.insert("\"")
+ delete_extra_space
+ pynutil.insert("quantity: \"")
+ suffix
+ pynutil.insert("\"")
)
res |= decimal + delete_extra_space + pynutil.insert("quantity: \"") + suffix + pynutil.insert("\"")
return res
class DecimalFst(GraphFst):
def __init__(self, cardinal: GraphFst):
super().__init__(name="decimal", kind="classify")
graph_decimal = cardinal.numbers_up_to_thousand
graph_decimal = pynini.closure(graph_decimal + delete_space) + graph_decimal
self.graph = graph_decimal
decimal_point = pynini.cross("coma", "morphosyntactic_features: \",\"")
decimal_point |= pynini.cross("punto", "morphosyntactic_features: \".\"")
optional_graph_negative = pynini.closure(
pynutil.insert("negative: ") + pynini.cross("menos", "\"true\"") + delete_extra_space, 0, 1
)
graph_fractional = pynutil.insert("fractional_part: \"") + graph_decimal + pynutil.insert("\"")
cardinal_graph = cardinal.graph_no_exception | pynini.string_file(get_abs_path("data/numbers/zero.tsv"))
graph_integer = pynutil.insert("integer_part: \"") + cardinal_graph + pynutil.insert("\"")
final_graph_wo_sign = (
pynini.closure(graph_integer + delete_extra_space, 0, 1)
+ decimal_point
+ delete_extra_space
+ graph_fractional
)
final_graph = optional_graph_negative + final_graph_wo_sign
self.final_graph_wo_negative = final_graph_wo_sign | get_quantity(
final_graph_wo_sign, cardinal.numbers_up_to_million
)
final_graph |= optional_graph_negative + get_quantity(final_graph_wo_sign, cardinal.numbers_up_to_million)
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| true | true |
f72e31c4f8145bf8b6d8b38fadebe4ece9f6f934 | 7,710 | py | Python | tool/tools.py | Khan-Xu/Pyrod | 3ee62e3d6037328a010d9340bf1e8ff991f48414 | [
"MIT"
] | null | null | null | tool/tools.py | Khan-Xu/Pyrod | 3ee62e3d6037328a010d9340bf1e8ff991f48414 | [
"MIT"
] | null | null | null | tool/tools.py | Khan-Xu/Pyrod | 3ee62e3d6037328a010d9340bf1e8ff991f48414 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 15 21:50:58 2018
@author: USER
"""
# Codes are free to use. Do whatever you want
from __future__ import absolute_import
"""Read raw data"""
####################### LIBRARY #############################
# exceptions library
from exceptions import (Data_Format_Exception,
Data_Match_Exception)
# Python stdlib imports
import datetime
from math import factorial
# data processing library
import numpy as np
# pyrod library
####################### CONSTANT ############################
# constant
####################### FUNCTIONS ###########################
'.......................optimise.........................'
# f - fitting data
# y - experiment data
# mask - mask data
def R_square(f, y, mask):
if not len(f) == len(y) == len(mask):
raise Data_Match_Exception('Please input equal length')
def nplist(data):
# check and transform data
try:
# check np array
if isinstance(data, np.ndarray):
pass
# check list
elif isinstance(data, list):
rl = np.array(data)
# check np mat
elif isinstance(data, np.matrix):
rl = np.asarray(data).reshape(-1)
# for other unpoackable datatype
else:
# init a list first
l = []
# unpack raw data with for
for e in data:
l.append(e)
# trans to np array
rl = np.array(l)
# unknown type
except Data_Format_Exception:
print('unknown data type')
return rl
# tranform to np array; apply mask
rf, ry = nplist(f)*nplist(mask), nplist(y)*nplist(mask)
# calculate r square
ss_tot = np.sum((ry - np.sum(ry)/len(ry))**2)
ss_res = np.sum((ry - rf)**2)
r2 = 1 - ss_res/ss_tot
return r2
def opt_step_brute(func,x0_range,grid_size = 10,step = 2):
"""
Brute method is much too slow and big.
However, its usefull and simple. To improve it, we try to step it
x0_range: range of variable, [x1-,x1+],[x2-,x2+]
currently,only two axes are avaialble
"""
# current step is 3
step = 3
# grid_size and step have to be integer
try:
grid_size = int(grid_size)
step = int(step)
except ValueError:
raise ValueError("grid_size and step have to be of type int")
# one dimensional step brute method
if len(x0_range) == 1:
# store func(grid_data) result
grid_list0 = []
x0 = np.linspace(x0_range[0][0],x0_range[0][1],grid_size)
# func(grid_data)
for px in range(grid_size):
grid_list0.append(func(x0[px]))
# store min in step1
min_idx = np.argmin(grid_list0)
# continue step2
grid_list1 = []
x1 = x0[min_idx]
delta = (abs(x0_range[0][1] - x0_range[0][0]))/grid_size
x2 = np.linspace(x1-delta,x1+delta,grid_size)
for sx in range(grid_size):
grid_list1.append(func(x2[sx]))
min_step2 = x2[np.argmin(grid_list1)]
elif len(x0_range) == 2:
# step1: grid the x0_range
min_step1 = []
au = np.linspace(x0_range[0][0],x0_range[0][1],grid_size)
av = np.linspace(x0_range[1][0],x0_range[1][1],grid_size)
# find minimum in xu and xv grid
def grid_min(xu,xv):
x0_grid = np.meshgrid(xu, xv)
#grid list
grid_list = np.mat(np.zeros([grid_size**2,3]))
idx = 0
# pu-- for postion in u axes
for pu in range(grid_size):
# pv--for postion in v axes
for pv in range(grid_size):
grid_list[idx,0] = x0_grid[0][pu,pv]
grid_list[idx,1] = x0_grid[1][pu,pv]
grid_list[idx,2] = func([x0_grid[0][pu,pv],
x0_grid[1][pu,pv]])
idx = idx + 1
# find the minimum in step1
min_idx = np.argmin(grid_list[:,2])
return grid_list[min_idx,:]
# append the firt minimum before rocking
min_step1.append(grid_min(au,av))
# start rocking, try to avoid local minmum
bu = au - (au[1]-au[0])/2
bv = av - (av[1]-av[0])/2
min_step1.append(grid_min(bu,bv))
# step 2
# step 2 new x range
u_min = np.min([min_step1[0][0,0],
min_step1[1][0,0]])
u_max = np.max([min_step1[0][0,0],
min_step1[1][0,0]])
deta_u = u_max - u_min
v_min = np.min([min_step1[0][0,1],
min_step1[1][0,1]])
v_max = np.max([min_step1[0][0,1],
min_step1[1][0,1]])
deta_v = v_max - v_min
# new u and v
cu = np.linspace(u_min-deta_u, u_min+deta_u, grid_size)
cv = np.linspace(v_min-deta_v, v_min+deta_v, grid_size)
min_step2 = grid_min(cu,cv).tolist()
return min_step2
'......................smooth.........................'
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
"""
Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
# integer value
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
######################## CLASSS #############################
| 31.469388 | 90 | 0.500778 |
from __future__ import absolute_import
window = (window_size -1) // 2
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
| true | true |
f72e324f5e7083cdb0caf72d507ef3b8937e4dd3 | 200 | py | Python | Pacotes/aula21.py | TonyRio/Python-Exercicios | 8a72d1b12418c6485794dae184425df0daf098bb | [
"MIT"
] | null | null | null | Pacotes/aula21.py | TonyRio/Python-Exercicios | 8a72d1b12418c6485794dae184425df0daf098bb | [
"MIT"
] | null | null | null | Pacotes/aula21.py | TonyRio/Python-Exercicios | 8a72d1b12418c6485794dae184425df0daf098bb | [
"MIT"
] | null | null | null |
def teste():
global s
print(f'na função teste S vale {s+2}')
print(f'na função teste N vale {n+1}')
s=10
n=4
print(f'no programa N vale {n}')
print(f'no programa S vale {s}')
teste()
| 14.285714 | 42 | 0.605 |
def teste():
global s
print(f'na função teste S vale {s+2}')
print(f'na função teste N vale {n+1}')
s=10
n=4
print(f'no programa N vale {n}')
print(f'no programa S vale {s}')
teste()
| true | true |
f72e334ec5fd24bbda5b8e0ef6637a8c287b6e2f | 15,343 | py | Python | corehq/apps/app_manager/views/formdesigner.py | dborowiecki/commcare-hq | f2f4fa67faec09040a98502f5657444075b63f2e | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/app_manager/views/formdesigner.py | dborowiecki/commcare-hq | f2f4fa67faec09040a98502f5657444075b63f2e | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/app_manager/views/formdesigner.py | dborowiecki/commcare-hq | f2f4fa67faec09040a98502f5657444075b63f2e | [
"BSD-3-Clause"
] | null | null | null | import json
import logging
from django.conf import settings
from django.contrib import messages
from django.http import Http404, HttpResponse, HttpResponseBadRequest
from django.shortcuts import render
from django.urls import reverse
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_GET
from couchdbkit.exceptions import ResourceConflict
from dimagi.utils.logging import notify_exception
from corehq import privileges, toggles
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.analytics.tasks import (
HUBSPOT_FORM_BUILDER_FORM_ID,
send_hubspot_form,
)
from corehq.apps.app_manager import add_ons
from corehq.apps.app_manager.app_schemas.casedb_schema import get_casedb_schema
from corehq.apps.app_manager.app_schemas.session_schema import (
get_session_schema,
)
from corehq.apps.app_manager.const import (
SCHEDULE_CURRENT_VISIT_NUMBER,
SCHEDULE_GLOBAL_NEXT_VISIT_DATE,
SCHEDULE_NEXT_DUE,
SCHEDULE_UNSCHEDULED_VISIT,
)
from corehq.apps.app_manager.dbaccessors import get_app
from corehq.apps.app_manager.decorators import require_can_edit_apps
from corehq.apps.app_manager.exceptions import (
AppManagerException,
FormNotFoundException,
)
from corehq.apps.app_manager.models import Form, ModuleNotFoundException
from corehq.apps.app_manager.templatetags.xforms_extras import translate
from corehq.apps.app_manager.util import (
app_callout_templates,
is_linked_app,
is_usercase_in_use,
)
from corehq.apps.app_manager.views.apps import get_apps_base_context
from corehq.apps.app_manager.views.forms import FormHasSubmissionsView
from corehq.apps.app_manager.views.notifications import (
get_facility_for_form,
notify_form_opened,
)
from corehq.apps.app_manager.views.utils import (
back_to_main,
bail,
form_has_submissions,
set_lang_cookie,
)
from corehq.apps.cloudcare.utils import should_show_preview_app
from corehq.apps.domain.decorators import track_domain_request
from corehq.apps.fixtures.fixturegenerators import item_lists_by_domain
from corehq.apps.hqwebapp.templatetags.hq_shared_tags import cachebuster
from corehq.util.context_processors import websockets_override
logger = logging.getLogger(__name__)
@require_can_edit_apps
@track_domain_request(calculated_prop='cp_n_form_builder_entered')
def form_source(request, domain, app_id, form_unique_id):
app = get_app(domain, app_id)
try:
form = app.get_form(form_unique_id)
except FormNotFoundException:
return bail(request, domain, app_id, not_found="form")
try:
module = form.get_module()
except AttributeError:
return bail(request, domain, app_id, not_found="module")
return _get_form_designer_view(request, domain, app, module, form)
@require_can_edit_apps
def form_source_legacy(request, domain, app_id, module_id=None, form_id=None):
"""
This view has been kept around to not break any documentation on example apps
and partner-distributed documentation on existing apps.
PLEASE DO NOT DELETE.
"""
app = get_app(domain, app_id)
try:
module = app.get_module(module_id)
except ModuleNotFoundException:
return bail(request, domain, app_id, not_found="module")
try:
form = module.get_form(form_id)
except IndexError:
return bail(request, domain, app_id, not_found="form")
return _get_form_designer_view(request, domain, app, module, form)
def _get_form_designer_view(request, domain, app, module, form):
if app and app.copy_of:
messages.warning(request, _(
"You tried to edit a form that was from a previous release, so "
"we have directed you to the latest version of your application."
))
return back_to_main(request, domain, app_id=app.id)
if form.no_vellum:
messages.warning(request, _(
"You tried to edit this form in the Form Builder. "
"However, your administrator has locked this form against editing "
"in the form builder, so we have redirected you to "
"the form's front page instead."
))
return back_to_main(request, domain, app_id=app.id,
form_unique_id=form.unique_id)
if is_linked_app(app):
messages.warning(request, _(
"You tried to edit this form in the Form Builder. "
"However, this is a linked application and you can only make changes to the "
"upstream version."
))
return back_to_main(request, domain, app_id=app.id)
send_hubspot_form(HUBSPOT_FORM_BUILDER_FORM_ID, request)
def _form_too_large(_app, _form):
# form less than 0.1MB, anything larger starts to have
# performance issues with fullstory
return _app.blobs['{}.xml'.format(_form.unique_id)]['content_length'] > 102400
context = get_apps_base_context(request, domain, app)
context.update(locals())
vellum_options = _get_base_vellum_options(request, domain, app, context['lang'])
vellum_options['core'] = _get_vellum_core_context(request, domain, app, module, form, context['lang'])
vellum_options['plugins'] = _get_vellum_plugins(domain, form, module)
vellum_options['features'] = _get_vellum_features(request, domain, app)
context['vellum_options'] = vellum_options
context.update({
'vellum_debug': settings.VELLUM_DEBUG,
'nav_form': form,
'formdesigner': True,
'include_fullstory': not _form_too_large(app, form),
'CKEDITOR_BASEPATH': "app_manager/js/vellum/lib/ckeditor/",
'show_live_preview': should_show_preview_app(
request,
app,
request.couch_user.username,
),
'show_ui_notification_to_hide_translations': (len(app.langs) > 2),
})
context.update(_get_requirejs_context())
if request.user.is_superuser:
context.update({'notification_options': _get_notification_options(request, domain, app, form)})
notify_form_opened(domain, request.couch_user, app.id, form.unique_id)
response = render(request, "app_manager/form_designer.html", context)
set_lang_cookie(response, context['lang'])
return response
@require_GET
@require_can_edit_apps
def get_form_data_schema(request, domain, form_unique_id):
"""Get data schema
One of `app_id` or `form_unique_id` is required. `app_id` is ignored
if `form_unique_id` is provided.
:returns: A list of data source schema definitions. A data source schema
definition is a dictionary. For details on the content of the dictionary,
see https://github.com/dimagi/Vellum/blob/master/src/datasources.js
"""
data = []
try:
form, app = Form.get_form(form_unique_id, and_app=True)
except ResourceConflict:
raise Http404()
if app.domain != domain:
raise Http404()
try:
data.append(get_session_schema(form))
if form.requires_case() or is_usercase_in_use(domain):
data.append(get_casedb_schema(form))
except AppManagerException as e:
notify_exception(request, message=str(e))
return HttpResponseBadRequest(
str(e) or _("There is an error in the case management of your application. "
"Please fix the error to see case properties in this tree")
)
except Exception as e:
notify_exception(request, message=str(e))
return HttpResponseBadRequest("schema error, see log for details")
data.extend(
sorted(item_lists_by_domain(domain), key=lambda x: x['name'].lower())
)
kw = {}
if "pretty" in request.GET:
kw["indent"] = 2
return HttpResponse(json.dumps(data, **kw))
@require_GET
def ping(request):
return HttpResponse("pong")
def _get_base_vellum_options(request, domain, app, displayLang):
"""
Returns the base set of options that will be passed into Vellum
when it is initialized.
:param displayLang: --> derived from the base context
"""
return {
'intents': {
'templates': next(app_callout_templates),
},
'javaRosa': {
'langs': app.langs,
'displayLanguage': displayLang,
'showOnlyCurrentLang': (app.smart_lang_display and (len(app.langs) > 2)),
},
'uploader': {
'uploadUrls': {
'image': reverse("hqmedia_uploader_image", args=[domain, app.id]),
'audio': reverse("hqmedia_uploader_audio", args=[domain, app.id]),
'video': reverse("hqmedia_uploader_video", args=[domain, app.id]),
'text': reverse("hqmedia_uploader_text", args=[domain, app.id]),
},
'objectMap': app.get_object_map(),
'sessionid': request.COOKIES.get('sessionid'),
},
}
def _get_vellum_core_context(request, domain, app, module, form, lang):
"""
Returns the core context that will be passed into vellum when it is
initialized.
"""
core = {
'dataSourcesEndpoint': reverse('get_form_data_schema',
kwargs={'domain': domain,
'form_unique_id': form.get_unique_id()}),
'form': form.source,
'formId': form.get_unique_id(),
'formName': translate(form.name, app.langs[0], app.langs),
'saveType': 'patch',
'saveUrl': reverse('edit_form_attr',
args=[domain, app.id, form.get_unique_id(),
'xform']),
'patchUrl': reverse('patch_xform',
args=[domain, app.id, form.get_unique_id()]),
'hasSubmissions': form_has_submissions(domain, app.id, form.get_unique_id()),
'hasSubmissionsUrl': reverse(FormHasSubmissionsView.urlname,
args=[domain, app.id, form.get_unique_id()]),
'allowedDataNodeReferences': [
"meta/deviceID",
"meta/instanceID",
"meta/username",
"meta/userID",
"meta/timeStart",
"meta/timeEnd",
"meta/location",
] + _get_core_context_scheduler_data_nodes(module, form),
'activityUrl': reverse('ping'),
'sessionid': request.COOKIES.get('sessionid'),
'externalLinks': {
'changeSubscription': reverse("domain_subscription_view",
kwargs={'domain': domain}),
},
'invalidCaseProperties': ['name'],
}
core.update(_get_core_context_help_text_context(form))
return core
def _get_vellum_plugins(domain, form, module):
"""
Returns a list of enabled vellum plugins based on the domain's
privileges.
"""
vellum_plugins = ["modeliteration", "itemset", "atwho"]
if (toggles.COMMTRACK.enabled(domain)
or toggles.NON_COMMTRACK_LEDGERS.enabled(domain)):
vellum_plugins.append("commtrack")
if toggles.VELLUM_SAVE_TO_CASE.enabled(domain):
vellum_plugins.append("saveToCase")
form_uses_case = (
(module and module.case_type and form.requires_case()) or
is_usercase_in_use(domain)
)
form_is_basic = form.doc_type == 'Form'
if form_uses_case and form_is_basic:
vellum_plugins.append("databrowser")
return vellum_plugins
def _get_vellum_features(request, domain, app):
"""
Returns the context of features passed into vellum when it is initialized.
"""
vellum_features = toggles.toggles_dict(username=request.user.username,
domain=domain)
vellum_features.update({
'group_in_field_list': app.enable_group_in_field_list,
'image_resize': app.enable_image_resize,
'markdown_in_groups': app.enable_markdown_in_groups,
'lookup_tables': domain_has_privilege(domain, privileges.LOOKUP_TABLES),
'templated_intents': domain_has_privilege(domain,
privileges.TEMPLATED_INTENTS),
'custom_intents': domain_has_privilege(domain,
privileges.CUSTOM_INTENTS),
'rich_text': True,
'sorted_itemsets': app.enable_sorted_itemsets,
'advanced_itemsets': add_ons.show("advanced_itemsets", request, app),
})
return vellum_features
def _get_core_context_help_text_context(form):
"""
Part of the vellum core context.
Returns the appropriate icon context for the form type and the
knockout template ID context for the correct help text
information when opening a blank form with this type.
"""
if form.get_action_type() == 'open':
default_help_text_template_id = '#fd-hq-helptext-registration'
form_icon_class = 'fcc fcc-app-createform'
elif form.get_action_type() == 'close':
default_help_text_template_id = '#fd-hq-helptext-close'
form_icon_class = 'fcc fcc-app-completeform'
elif form.get_action_type() == 'update':
default_help_text_template_id = '#fd-hq-helptext-followup'
form_icon_class = 'fcc fcc-app-updateform'
else:
default_help_text_template_id = '#fd-hq-helptext-survey'
form_icon_class = 'fa fa-file-o'
return {
'defaultHelpTextTemplateId': default_help_text_template_id,
'formIconClass': form_icon_class,
}
def _get_core_context_scheduler_data_nodes(module, form):
"""
Part of the vellum core context.
Returns a list of enabled scheduler data nodes.
"""
has_schedule = (
getattr(module, 'has_schedule', False) and
getattr(form, 'schedule', False) and form.schedule.enabled
)
scheduler_data_nodes = []
if has_schedule:
scheduler_data_nodes = [
SCHEDULE_CURRENT_VISIT_NUMBER,
SCHEDULE_NEXT_DUE,
SCHEDULE_UNSCHEDULED_VISIT,
SCHEDULE_GLOBAL_NEXT_VISIT_DATE,
]
scheduler_data_nodes.extend([
"next_{}".format(f.schedule_form_id)
for f in form.get_phase().get_forms()
if getattr(f, 'schedule', False) and f.schedule.enabled
])
return scheduler_data_nodes
def _get_notification_options(request, domain, app, form):
notification_options = websockets_override(request)
if notification_options['WS4REDIS_HEARTBEAT'] in ['null', 'undefined']:
notification_options['WS4REDIS_HEARTBEAT'] = None
notification_options.update({
'notify_facility': get_facility_for_form(domain, app.id,
form.unique_id),
'user_id': request.couch_user.get_id,
})
return notification_options
def _get_requirejs_context():
requirejs = {
'requirejs_args': 'version={}{}'.format(
cachebuster("app_manager/js/vellum/src/main-components.js"),
cachebuster("app_manager/js/vellum/src/local-deps.js")
),
}
if not settings.VELLUM_DEBUG:
requirejs_url = "app_manager/js/vellum/src"
elif settings.VELLUM_DEBUG == "dev-min":
requirejs_url = "formdesigner/_build/src"
else:
requirejs_url = "formdesigner/src"
requirejs['requirejs_url'] = requirejs_url
return requirejs
| 36.618138 | 106 | 0.671381 | import json
import logging
from django.conf import settings
from django.contrib import messages
from django.http import Http404, HttpResponse, HttpResponseBadRequest
from django.shortcuts import render
from django.urls import reverse
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_GET
from couchdbkit.exceptions import ResourceConflict
from dimagi.utils.logging import notify_exception
from corehq import privileges, toggles
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.analytics.tasks import (
HUBSPOT_FORM_BUILDER_FORM_ID,
send_hubspot_form,
)
from corehq.apps.app_manager import add_ons
from corehq.apps.app_manager.app_schemas.casedb_schema import get_casedb_schema
from corehq.apps.app_manager.app_schemas.session_schema import (
get_session_schema,
)
from corehq.apps.app_manager.const import (
SCHEDULE_CURRENT_VISIT_NUMBER,
SCHEDULE_GLOBAL_NEXT_VISIT_DATE,
SCHEDULE_NEXT_DUE,
SCHEDULE_UNSCHEDULED_VISIT,
)
from corehq.apps.app_manager.dbaccessors import get_app
from corehq.apps.app_manager.decorators import require_can_edit_apps
from corehq.apps.app_manager.exceptions import (
AppManagerException,
FormNotFoundException,
)
from corehq.apps.app_manager.models import Form, ModuleNotFoundException
from corehq.apps.app_manager.templatetags.xforms_extras import translate
from corehq.apps.app_manager.util import (
app_callout_templates,
is_linked_app,
is_usercase_in_use,
)
from corehq.apps.app_manager.views.apps import get_apps_base_context
from corehq.apps.app_manager.views.forms import FormHasSubmissionsView
from corehq.apps.app_manager.views.notifications import (
get_facility_for_form,
notify_form_opened,
)
from corehq.apps.app_manager.views.utils import (
back_to_main,
bail,
form_has_submissions,
set_lang_cookie,
)
from corehq.apps.cloudcare.utils import should_show_preview_app
from corehq.apps.domain.decorators import track_domain_request
from corehq.apps.fixtures.fixturegenerators import item_lists_by_domain
from corehq.apps.hqwebapp.templatetags.hq_shared_tags import cachebuster
from corehq.util.context_processors import websockets_override
logger = logging.getLogger(__name__)
@require_can_edit_apps
@track_domain_request(calculated_prop='cp_n_form_builder_entered')
def form_source(request, domain, app_id, form_unique_id):
app = get_app(domain, app_id)
try:
form = app.get_form(form_unique_id)
except FormNotFoundException:
return bail(request, domain, app_id, not_found="form")
try:
module = form.get_module()
except AttributeError:
return bail(request, domain, app_id, not_found="module")
return _get_form_designer_view(request, domain, app, module, form)
@require_can_edit_apps
def form_source_legacy(request, domain, app_id, module_id=None, form_id=None):
app = get_app(domain, app_id)
try:
module = app.get_module(module_id)
except ModuleNotFoundException:
return bail(request, domain, app_id, not_found="module")
try:
form = module.get_form(form_id)
except IndexError:
return bail(request, domain, app_id, not_found="form")
return _get_form_designer_view(request, domain, app, module, form)
def _get_form_designer_view(request, domain, app, module, form):
if app and app.copy_of:
messages.warning(request, _(
"You tried to edit a form that was from a previous release, so "
"we have directed you to the latest version of your application."
))
return back_to_main(request, domain, app_id=app.id)
if form.no_vellum:
messages.warning(request, _(
"You tried to edit this form in the Form Builder. "
"However, your administrator has locked this form against editing "
"in the form builder, so we have redirected you to "
"the form's front page instead."
))
return back_to_main(request, domain, app_id=app.id,
form_unique_id=form.unique_id)
if is_linked_app(app):
messages.warning(request, _(
"You tried to edit this form in the Form Builder. "
"However, this is a linked application and you can only make changes to the "
"upstream version."
))
return back_to_main(request, domain, app_id=app.id)
send_hubspot_form(HUBSPOT_FORM_BUILDER_FORM_ID, request)
def _form_too_large(_app, _form):
# form less than 0.1MB, anything larger starts to have
# performance issues with fullstory
return _app.blobs['{}.xml'.format(_form.unique_id)]['content_length'] > 102400
context = get_apps_base_context(request, domain, app)
context.update(locals())
vellum_options = _get_base_vellum_options(request, domain, app, context['lang'])
vellum_options['core'] = _get_vellum_core_context(request, domain, app, module, form, context['lang'])
vellum_options['plugins'] = _get_vellum_plugins(domain, form, module)
vellum_options['features'] = _get_vellum_features(request, domain, app)
context['vellum_options'] = vellum_options
context.update({
'vellum_debug': settings.VELLUM_DEBUG,
'nav_form': form,
'formdesigner': True,
'include_fullstory': not _form_too_large(app, form),
'CKEDITOR_BASEPATH': "app_manager/js/vellum/lib/ckeditor/",
'show_live_preview': should_show_preview_app(
request,
app,
request.couch_user.username,
),
'show_ui_notification_to_hide_translations': (len(app.langs) > 2),
})
context.update(_get_requirejs_context())
if request.user.is_superuser:
context.update({'notification_options': _get_notification_options(request, domain, app, form)})
notify_form_opened(domain, request.couch_user, app.id, form.unique_id)
response = render(request, "app_manager/form_designer.html", context)
set_lang_cookie(response, context['lang'])
return response
@require_GET
@require_can_edit_apps
def get_form_data_schema(request, domain, form_unique_id):
data = []
try:
form, app = Form.get_form(form_unique_id, and_app=True)
except ResourceConflict:
raise Http404()
if app.domain != domain:
raise Http404()
try:
data.append(get_session_schema(form))
if form.requires_case() or is_usercase_in_use(domain):
data.append(get_casedb_schema(form))
except AppManagerException as e:
notify_exception(request, message=str(e))
return HttpResponseBadRequest(
str(e) or _("There is an error in the case management of your application. "
"Please fix the error to see case properties in this tree")
)
except Exception as e:
notify_exception(request, message=str(e))
return HttpResponseBadRequest("schema error, see log for details")
data.extend(
sorted(item_lists_by_domain(domain), key=lambda x: x['name'].lower())
)
kw = {}
if "pretty" in request.GET:
kw["indent"] = 2
return HttpResponse(json.dumps(data, **kw))
@require_GET
def ping(request):
return HttpResponse("pong")
def _get_base_vellum_options(request, domain, app, displayLang):
return {
'intents': {
'templates': next(app_callout_templates),
},
'javaRosa': {
'langs': app.langs,
'displayLanguage': displayLang,
'showOnlyCurrentLang': (app.smart_lang_display and (len(app.langs) > 2)),
},
'uploader': {
'uploadUrls': {
'image': reverse("hqmedia_uploader_image", args=[domain, app.id]),
'audio': reverse("hqmedia_uploader_audio", args=[domain, app.id]),
'video': reverse("hqmedia_uploader_video", args=[domain, app.id]),
'text': reverse("hqmedia_uploader_text", args=[domain, app.id]),
},
'objectMap': app.get_object_map(),
'sessionid': request.COOKIES.get('sessionid'),
},
}
def _get_vellum_core_context(request, domain, app, module, form, lang):
core = {
'dataSourcesEndpoint': reverse('get_form_data_schema',
kwargs={'domain': domain,
'form_unique_id': form.get_unique_id()}),
'form': form.source,
'formId': form.get_unique_id(),
'formName': translate(form.name, app.langs[0], app.langs),
'saveType': 'patch',
'saveUrl': reverse('edit_form_attr',
args=[domain, app.id, form.get_unique_id(),
'xform']),
'patchUrl': reverse('patch_xform',
args=[domain, app.id, form.get_unique_id()]),
'hasSubmissions': form_has_submissions(domain, app.id, form.get_unique_id()),
'hasSubmissionsUrl': reverse(FormHasSubmissionsView.urlname,
args=[domain, app.id, form.get_unique_id()]),
'allowedDataNodeReferences': [
"meta/deviceID",
"meta/instanceID",
"meta/username",
"meta/userID",
"meta/timeStart",
"meta/timeEnd",
"meta/location",
] + _get_core_context_scheduler_data_nodes(module, form),
'activityUrl': reverse('ping'),
'sessionid': request.COOKIES.get('sessionid'),
'externalLinks': {
'changeSubscription': reverse("domain_subscription_view",
kwargs={'domain': domain}),
},
'invalidCaseProperties': ['name'],
}
core.update(_get_core_context_help_text_context(form))
return core
def _get_vellum_plugins(domain, form, module):
vellum_plugins = ["modeliteration", "itemset", "atwho"]
if (toggles.COMMTRACK.enabled(domain)
or toggles.NON_COMMTRACK_LEDGERS.enabled(domain)):
vellum_plugins.append("commtrack")
if toggles.VELLUM_SAVE_TO_CASE.enabled(domain):
vellum_plugins.append("saveToCase")
form_uses_case = (
(module and module.case_type and form.requires_case()) or
is_usercase_in_use(domain)
)
form_is_basic = form.doc_type == 'Form'
if form_uses_case and form_is_basic:
vellum_plugins.append("databrowser")
return vellum_plugins
def _get_vellum_features(request, domain, app):
vellum_features = toggles.toggles_dict(username=request.user.username,
domain=domain)
vellum_features.update({
'group_in_field_list': app.enable_group_in_field_list,
'image_resize': app.enable_image_resize,
'markdown_in_groups': app.enable_markdown_in_groups,
'lookup_tables': domain_has_privilege(domain, privileges.LOOKUP_TABLES),
'templated_intents': domain_has_privilege(domain,
privileges.TEMPLATED_INTENTS),
'custom_intents': domain_has_privilege(domain,
privileges.CUSTOM_INTENTS),
'rich_text': True,
'sorted_itemsets': app.enable_sorted_itemsets,
'advanced_itemsets': add_ons.show("advanced_itemsets", request, app),
})
return vellum_features
def _get_core_context_help_text_context(form):
if form.get_action_type() == 'open':
default_help_text_template_id = '
form_icon_class = 'fcc fcc-app-createform'
elif form.get_action_type() == 'close':
default_help_text_template_id = '
form_icon_class = 'fcc fcc-app-completeform'
elif form.get_action_type() == 'update':
default_help_text_template_id = '
form_icon_class = 'fcc fcc-app-updateform'
else:
default_help_text_template_id = '
form_icon_class = 'fa fa-file-o'
return {
'defaultHelpTextTemplateId': default_help_text_template_id,
'formIconClass': form_icon_class,
}
def _get_core_context_scheduler_data_nodes(module, form):
has_schedule = (
getattr(module, 'has_schedule', False) and
getattr(form, 'schedule', False) and form.schedule.enabled
)
scheduler_data_nodes = []
if has_schedule:
scheduler_data_nodes = [
SCHEDULE_CURRENT_VISIT_NUMBER,
SCHEDULE_NEXT_DUE,
SCHEDULE_UNSCHEDULED_VISIT,
SCHEDULE_GLOBAL_NEXT_VISIT_DATE,
]
scheduler_data_nodes.extend([
"next_{}".format(f.schedule_form_id)
for f in form.get_phase().get_forms()
if getattr(f, 'schedule', False) and f.schedule.enabled
])
return scheduler_data_nodes
def _get_notification_options(request, domain, app, form):
notification_options = websockets_override(request)
if notification_options['WS4REDIS_HEARTBEAT'] in ['null', 'undefined']:
notification_options['WS4REDIS_HEARTBEAT'] = None
notification_options.update({
'notify_facility': get_facility_for_form(domain, app.id,
form.unique_id),
'user_id': request.couch_user.get_id,
})
return notification_options
def _get_requirejs_context():
requirejs = {
'requirejs_args': 'version={}{}'.format(
cachebuster("app_manager/js/vellum/src/main-components.js"),
cachebuster("app_manager/js/vellum/src/local-deps.js")
),
}
if not settings.VELLUM_DEBUG:
requirejs_url = "app_manager/js/vellum/src"
elif settings.VELLUM_DEBUG == "dev-min":
requirejs_url = "formdesigner/_build/src"
else:
requirejs_url = "formdesigner/src"
requirejs['requirejs_url'] = requirejs_url
return requirejs
| true | true |
f72e353cb0430f84180c9d81bdb77f14364cdd8e | 10,548 | py | Python | jschon/jsonpatch.py | ikonst/jschon | 4aa5c2ffce1dca831342aab232bceff9c542c137 | [
"MIT"
] | null | null | null | jschon/jsonpatch.py | ikonst/jschon | 4aa5c2ffce1dca831342aab232bceff9c542c137 | [
"MIT"
] | null | null | null | jschon/jsonpatch.py | ikonst/jschon | 4aa5c2ffce1dca831342aab232bceff9c542c137 | [
"MIT"
] | null | null | null | from __future__ import annotations
from copy import deepcopy
from enum import Enum
from typing import Dict, Iterable, List, Mapping, MutableSequence, Optional, Sequence, Union, overload
from jschon.exceptions import JSONPatchError, JSONPointerError
from jschon.json import JSON, JSONCompatible
from jschon.jsonpointer import JSONPointer
__all__ = [
'JSONPatch',
'JSONPatchOperation',
'PatchOp',
'apply_add',
'apply_remove',
'apply_replace',
'apply_move',
'apply_copy',
'apply_test',
]
class PatchOp(str, Enum):
ADD = 'add'
REMOVE = 'remove'
REPLACE = 'replace'
MOVE = 'move'
COPY = 'copy'
TEST = 'test'
def __repr__(self) -> str:
return f'PatchOp.{self.name}'
class JSONPatchOperation:
""":rfc:`6902`-conformant JSON patch operation object."""
def __new__(
cls,
*,
op: PatchOp,
path: Union[str, JSONPointer],
value: JSONCompatible = None,
from_: Optional[Union[str, JSONPointer]] = None,
**kwargs: Union[str, JSONPointer],
) -> JSONPatchOperation:
"""Create and return a new :class:`JSONPatchOperation` instance.
:param op: The operation to perform. One of ``add``, ``remove``,
``replace``, ``move``, ``copy``, ``test``.
:param path: A JSON pointer to the target location.
:param value: For ``add`` and ``replace`` operations, the value
to set at the target location. For ``test``, the value to
compare with the target.
:param from_: The location from which to ``move`` or ``copy``.
An alias for `from`, which may be passed via `kwargs`.
"""
self = object.__new__(cls)
self.op = PatchOp(op)
self.path = JSONPointer(path) if isinstance(path, str) else path
self.value = value
if from_ is None:
from_ = kwargs.pop('from', None)
self.from_ = JSONPointer(from_) if isinstance(from_, str) else from_
return self
def apply(self, document: JSONCompatible) -> JSONCompatible:
"""Apply the patch operation to `document` and return the
resultant document."""
if self.op == 'add':
return apply_add(document, self.path, self.value)
if self.op == 'remove':
return apply_remove(document, self.path)
if self.op == 'replace':
return apply_replace(document, self.path, self.value)
if self.op == 'move':
return apply_move(document, self.path, self.from_)
if self.op == 'copy':
return apply_copy(document, self.path, self.from_)
if self.op == 'test':
return apply_test(document, self.path, self.value)
def asdict(self) -> Dict[str, JSONCompatible]:
"""Return `self` as a dict."""
result = {
'op': self.op.value,
'path': str(self.path),
}
if self.op in ('add', 'replace', 'test'):
result['value'] = self.value
elif self.op in ('move', 'copy'):
result['from'] = str(self.from_)
return result
def __eq__(self, other: Union[JSONPatchOperation, Mapping[str, JSONCompatible]]) -> bool:
"""Return `self == other`."""
if not isinstance(other, JSONPatchOperation):
other = JSONPatchOperation(**other)
return (self.op == other.op and
self.path == other.path and
self.from_ == other.from_ and
JSON(self.value) == JSON(other.value))
def __repr__(self) -> str:
"""Return `repr(self)`."""
return f'JSONPatchOperation(op={self.op!r}, path={self.path!r}, from_={self.from_!r}, value={self.value!r})'
class JSONPatch(MutableSequence[JSONPatchOperation]):
""":rfc:`6902`-conformant JSON Patch implementation."""
def __init__(self, *operations: Union[JSONPatchOperation, Mapping[str, JSONCompatible]]) -> None:
"""Initialize a :class:`JSONPatch` instance from the given `operations`,
each of which may be a :class:`JSONPatchOperation` or a JSON patch
operation-conformant dictionary.
"""
self._operations: List[JSONPatchOperation] = [
operation if isinstance(operation, JSONPatchOperation) else JSONPatchOperation(**operation)
for operation in operations
]
def evaluate(self, document: JSONCompatible) -> JSONCompatible:
"""Return the result of sequentially applying all patch operations
to `document`, as a new document. `document` itself is not modified."""
result = deepcopy(document)
for operation in self._operations:
result = operation.apply(result)
return result
def aslist(self) -> List[Dict[str, JSONCompatible]]:
"""Return `self` as a list of operation dicts."""
return [
operation.asdict()
for operation in self._operations
]
@overload
def __getitem__(self, index: int) -> JSONPatchOperation:
...
@overload
def __getitem__(self, index: slice) -> JSONPatch:
...
def __getitem__(self, index):
"""Return `self[index]`."""
if isinstance(index, int):
return self._operations[index]
if isinstance(index, slice):
return JSONPatch(*self._operations[index])
raise TypeError('Expecting int or slice')
def __setitem__(self, index: int, operation: Union[JSONPatchOperation, Mapping[str, JSONCompatible]]) -> None:
"""Set `self[index]` to `operation`."""
if not isinstance(operation, JSONPatchOperation):
operation = JSONPatchOperation(**operation)
self._operations[index] = operation
def __delitem__(self, index: int) -> None:
"""Delete `self[index]`."""
del self._operations[index]
def __len__(self) -> int:
"""Return `len(self)`."""
return len(self._operations)
def insert(self, index: int, operation: Union[JSONPatchOperation, Mapping[str, JSONCompatible]]) -> None:
"""Insert `operation` before `index`."""
if not isinstance(operation, JSONPatchOperation):
operation = JSONPatchOperation(**operation)
self._operations.insert(index, operation)
def __eq__(self, other: Union[JSONPatch, Iterable[Union[JSONPatchOperation, Mapping[str, JSONCompatible]]]]) -> bool:
"""Return `self == other`."""
if not isinstance(other, JSONPatch):
other = JSONPatch(*other)
return self._operations == other._operations
def __repr__(self) -> str:
"""Return `repr(self)`."""
return f'JSONPatch(*{self._operations!r})'
class NodeType(Enum):
ROOT = 0
ARRAY_ITEM = 1
ARRAY_ITEM_NEW = 2
OBJECT_PROPERTY = 3
OBJECT_PROPERTY_NEW = 4
class Node:
def __init__(self, document: JSONCompatible, path: JSONPointer):
if not path:
self.type = NodeType.ROOT
self.parent = None
self.index = None
return
try:
self.parent = (parent := path[:-1].evaluate(document))
key = path[-1]
except JSONPointerError as e:
raise JSONPatchError(f'Expecting an array or object at {path[:-1]}') from e
if isinstance(parent, Sequence):
try:
if key == '-' or int(key) == len(parent):
self.type = NodeType.ARRAY_ITEM_NEW
self.index = len(parent)
elif 0 <= int(key) < len(parent):
self.type = NodeType.ARRAY_ITEM
self.index = int(key)
else:
raise ValueError
except ValueError:
raise JSONPatchError(f'Invalid array index {key}')
elif isinstance(parent, Mapping):
self.type = NodeType.OBJECT_PROPERTY if key in parent \
else NodeType.OBJECT_PROPERTY_NEW
self.index = key
else:
assert False
def apply_add(document: JSONCompatible, path: JSONPointer, value: JSONCompatible) -> JSONCompatible:
target = Node(document, path)
value = deepcopy(value)
if target.type == NodeType.ROOT:
return value
if target.type in (NodeType.ARRAY_ITEM, NodeType.ARRAY_ITEM_NEW):
target.parent.insert(target.index, value)
elif target.type in (NodeType.OBJECT_PROPERTY, NodeType.OBJECT_PROPERTY_NEW):
target.parent[target.index] = value
return document
def apply_remove(document: JSONCompatible, path: JSONPointer) -> JSONCompatible:
target = Node(document, path)
if target.type == NodeType.ROOT:
return None
if target.type in (NodeType.ARRAY_ITEM, NodeType.OBJECT_PROPERTY):
del target.parent[target.index]
else:
raise JSONPatchError(f'Cannot remove nonexistent target at {path}')
return document
def apply_replace(document: JSONCompatible, path: JSONPointer, value: JSONCompatible) -> JSONCompatible:
target = Node(document, path)
value = deepcopy(value)
if target.type == NodeType.ROOT:
return value
if target.type in (NodeType.ARRAY_ITEM, NodeType.OBJECT_PROPERTY):
target.parent[target.index] = value
else:
raise JSONPatchError(f'Cannot replace nonexistent target at {path}')
return document
def apply_move(document: JSONCompatible, path: JSONPointer, from_: JSONPointer) -> JSONCompatible:
try:
value = from_.evaluate(document)
except JSONPointerError as e:
raise JSONPatchError(f'Cannot move from nonexistent location {from_}') from e
document = apply_remove(document, from_)
return apply_add(document, path, value)
def apply_copy(document: JSONCompatible, path: JSONPointer, from_: JSONPointer) -> JSONCompatible:
try:
value = from_.evaluate(document)
except JSONPointerError as e:
raise JSONPatchError(f'Cannot copy from nonexistent location {from_}') from e
return apply_add(document, path, value)
def apply_test(document: JSONCompatible, path: JSONPointer, value: JSONCompatible) -> JSONCompatible:
target = Node(document, path)
if target.type in (NodeType.ROOT, NodeType.ARRAY_ITEM, NodeType.OBJECT_PROPERTY):
if JSON(path.evaluate(document)) != JSON(value):
raise JSONPatchError(f'The value at {path} does not equal {value}')
else:
raise JSONPatchError(f'Cannot test nonexistent target at {path}')
return document
| 34.811881 | 121 | 0.622677 | from __future__ import annotations
from copy import deepcopy
from enum import Enum
from typing import Dict, Iterable, List, Mapping, MutableSequence, Optional, Sequence, Union, overload
from jschon.exceptions import JSONPatchError, JSONPointerError
from jschon.json import JSON, JSONCompatible
from jschon.jsonpointer import JSONPointer
__all__ = [
'JSONPatch',
'JSONPatchOperation',
'PatchOp',
'apply_add',
'apply_remove',
'apply_replace',
'apply_move',
'apply_copy',
'apply_test',
]
class PatchOp(str, Enum):
ADD = 'add'
REMOVE = 'remove'
REPLACE = 'replace'
MOVE = 'move'
COPY = 'copy'
TEST = 'test'
def __repr__(self) -> str:
return f'PatchOp.{self.name}'
class JSONPatchOperation:
def __new__(
cls,
*,
op: PatchOp,
path: Union[str, JSONPointer],
value: JSONCompatible = None,
from_: Optional[Union[str, JSONPointer]] = None,
**kwargs: Union[str, JSONPointer],
) -> JSONPatchOperation:
self = object.__new__(cls)
self.op = PatchOp(op)
self.path = JSONPointer(path) if isinstance(path, str) else path
self.value = value
if from_ is None:
from_ = kwargs.pop('from', None)
self.from_ = JSONPointer(from_) if isinstance(from_, str) else from_
return self
def apply(self, document: JSONCompatible) -> JSONCompatible:
if self.op == 'add':
return apply_add(document, self.path, self.value)
if self.op == 'remove':
return apply_remove(document, self.path)
if self.op == 'replace':
return apply_replace(document, self.path, self.value)
if self.op == 'move':
return apply_move(document, self.path, self.from_)
if self.op == 'copy':
return apply_copy(document, self.path, self.from_)
if self.op == 'test':
return apply_test(document, self.path, self.value)
def asdict(self) -> Dict[str, JSONCompatible]:
result = {
'op': self.op.value,
'path': str(self.path),
}
if self.op in ('add', 'replace', 'test'):
result['value'] = self.value
elif self.op in ('move', 'copy'):
result['from'] = str(self.from_)
return result
def __eq__(self, other: Union[JSONPatchOperation, Mapping[str, JSONCompatible]]) -> bool:
if not isinstance(other, JSONPatchOperation):
other = JSONPatchOperation(**other)
return (self.op == other.op and
self.path == other.path and
self.from_ == other.from_ and
JSON(self.value) == JSON(other.value))
def __repr__(self) -> str:
return f'JSONPatchOperation(op={self.op!r}, path={self.path!r}, from_={self.from_!r}, value={self.value!r})'
class JSONPatch(MutableSequence[JSONPatchOperation]):
def __init__(self, *operations: Union[JSONPatchOperation, Mapping[str, JSONCompatible]]) -> None:
self._operations: List[JSONPatchOperation] = [
operation if isinstance(operation, JSONPatchOperation) else JSONPatchOperation(**operation)
for operation in operations
]
def evaluate(self, document: JSONCompatible) -> JSONCompatible:
result = deepcopy(document)
for operation in self._operations:
result = operation.apply(result)
return result
def aslist(self) -> List[Dict[str, JSONCompatible]]:
return [
operation.asdict()
for operation in self._operations
]
@overload
def __getitem__(self, index: int) -> JSONPatchOperation:
...
@overload
def __getitem__(self, index: slice) -> JSONPatch:
...
def __getitem__(self, index):
if isinstance(index, int):
return self._operations[index]
if isinstance(index, slice):
return JSONPatch(*self._operations[index])
raise TypeError('Expecting int or slice')
def __setitem__(self, index: int, operation: Union[JSONPatchOperation, Mapping[str, JSONCompatible]]) -> None:
if not isinstance(operation, JSONPatchOperation):
operation = JSONPatchOperation(**operation)
self._operations[index] = operation
def __delitem__(self, index: int) -> None:
del self._operations[index]
def __len__(self) -> int:
return len(self._operations)
def insert(self, index: int, operation: Union[JSONPatchOperation, Mapping[str, JSONCompatible]]) -> None:
if not isinstance(operation, JSONPatchOperation):
operation = JSONPatchOperation(**operation)
self._operations.insert(index, operation)
def __eq__(self, other: Union[JSONPatch, Iterable[Union[JSONPatchOperation, Mapping[str, JSONCompatible]]]]) -> bool:
if not isinstance(other, JSONPatch):
other = JSONPatch(*other)
return self._operations == other._operations
def __repr__(self) -> str:
return f'JSONPatch(*{self._operations!r})'
class NodeType(Enum):
ROOT = 0
ARRAY_ITEM = 1
ARRAY_ITEM_NEW = 2
OBJECT_PROPERTY = 3
OBJECT_PROPERTY_NEW = 4
class Node:
def __init__(self, document: JSONCompatible, path: JSONPointer):
if not path:
self.type = NodeType.ROOT
self.parent = None
self.index = None
return
try:
self.parent = (parent := path[:-1].evaluate(document))
key = path[-1]
except JSONPointerError as e:
raise JSONPatchError(f'Expecting an array or object at {path[:-1]}') from e
if isinstance(parent, Sequence):
try:
if key == '-' or int(key) == len(parent):
self.type = NodeType.ARRAY_ITEM_NEW
self.index = len(parent)
elif 0 <= int(key) < len(parent):
self.type = NodeType.ARRAY_ITEM
self.index = int(key)
else:
raise ValueError
except ValueError:
raise JSONPatchError(f'Invalid array index {key}')
elif isinstance(parent, Mapping):
self.type = NodeType.OBJECT_PROPERTY if key in parent \
else NodeType.OBJECT_PROPERTY_NEW
self.index = key
else:
assert False
def apply_add(document: JSONCompatible, path: JSONPointer, value: JSONCompatible) -> JSONCompatible:
target = Node(document, path)
value = deepcopy(value)
if target.type == NodeType.ROOT:
return value
if target.type in (NodeType.ARRAY_ITEM, NodeType.ARRAY_ITEM_NEW):
target.parent.insert(target.index, value)
elif target.type in (NodeType.OBJECT_PROPERTY, NodeType.OBJECT_PROPERTY_NEW):
target.parent[target.index] = value
return document
def apply_remove(document: JSONCompatible, path: JSONPointer) -> JSONCompatible:
target = Node(document, path)
if target.type == NodeType.ROOT:
return None
if target.type in (NodeType.ARRAY_ITEM, NodeType.OBJECT_PROPERTY):
del target.parent[target.index]
else:
raise JSONPatchError(f'Cannot remove nonexistent target at {path}')
return document
def apply_replace(document: JSONCompatible, path: JSONPointer, value: JSONCompatible) -> JSONCompatible:
target = Node(document, path)
value = deepcopy(value)
if target.type == NodeType.ROOT:
return value
if target.type in (NodeType.ARRAY_ITEM, NodeType.OBJECT_PROPERTY):
target.parent[target.index] = value
else:
raise JSONPatchError(f'Cannot replace nonexistent target at {path}')
return document
def apply_move(document: JSONCompatible, path: JSONPointer, from_: JSONPointer) -> JSONCompatible:
try:
value = from_.evaluate(document)
except JSONPointerError as e:
raise JSONPatchError(f'Cannot move from nonexistent location {from_}') from e
document = apply_remove(document, from_)
return apply_add(document, path, value)
def apply_copy(document: JSONCompatible, path: JSONPointer, from_: JSONPointer) -> JSONCompatible:
try:
value = from_.evaluate(document)
except JSONPointerError as e:
raise JSONPatchError(f'Cannot copy from nonexistent location {from_}') from e
return apply_add(document, path, value)
def apply_test(document: JSONCompatible, path: JSONPointer, value: JSONCompatible) -> JSONCompatible:
target = Node(document, path)
if target.type in (NodeType.ROOT, NodeType.ARRAY_ITEM, NodeType.OBJECT_PROPERTY):
if JSON(path.evaluate(document)) != JSON(value):
raise JSONPatchError(f'The value at {path} does not equal {value}')
else:
raise JSONPatchError(f'Cannot test nonexistent target at {path}')
return document
| true | true |
f72e35604f795a4e13916642b9ae4c94a24be9b3 | 139 | py | Python | manager.py | HenryChenV/Spiritline | 2fcea54886ba3945c3359ce9fa1a3f20257fa8b1 | [
"MIT"
] | null | null | null | manager.py | HenryChenV/Spiritline | 2fcea54886ba3945c3359ce9fa1a3f20257fa8b1 | [
"MIT"
] | null | null | null | manager.py | HenryChenV/Spiritline | 2fcea54886ba3945c3359ce9fa1a3f20257fa8b1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding=utf-8 -*-
"""
Manager
~~~~~~~
"""
from cmds import cmds
if __name__ == '__main__':
cmds()
| 11.583333 | 26 | 0.517986 |
from cmds import cmds
if __name__ == '__main__':
cmds()
| true | true |
f72e3621aa8551368313d955fdd5e69ddb1b2246 | 23,667 | py | Python | rift/engine.py | kennethhuang123/rift-python | f4c208fe39cb14535573708637fa2345c919666b | [
"Apache-2.0"
] | 43 | 2018-07-19T17:41:35.000Z | 2022-03-16T04:04:09.000Z | rift/engine.py | kennethhuang123/rift-python | f4c208fe39cb14535573708637fa2345c919666b | [
"Apache-2.0"
] | 96 | 2018-07-19T11:06:08.000Z | 2021-07-27T10:52:09.000Z | rift/engine.py | kennethhuang123/rift-python | f4c208fe39cb14535573708637fa2345c919666b | [
"Apache-2.0"
] | 29 | 2018-07-24T22:01:20.000Z | 2022-02-13T21:28:18.000Z | import atexit
import logging
import random
import os
import sys
import termios
import sortedcontainers
import netifaces
import cli_listen_handler
import cli_session_handler
import constants
import interface
import key
import node
import scheduler
import stats
import table
OLD_TERMINAL_SETTINGS = None
# pylint:disable=global-statement
def make_terminal_unbuffered():
# Based on https://stackoverflow.com/questions/21791621/taking-input-from-sys-stdin-non-blocking
global OLD_TERMINAL_SETTINGS
OLD_TERMINAL_SETTINGS = termios.tcgetattr(sys.stdin)
new_settings = termios.tcgetattr(sys.stdin)
new_settings[3] = new_settings[3] & ~(termios.ECHO | termios.ICANON)
new_settings[6][termios.VMIN] = 0
new_settings[6][termios.VTIME] = 0
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, new_settings)
@atexit.register
def restore_terminal():
global OLD_TERMINAL_SETTINGS
if OLD_TERMINAL_SETTINGS:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, OLD_TERMINAL_SETTINGS)
OLD_TERMINAL_SETTINGS = None
class Engine:
def __init__(self, passive_nodes, run_which_nodes, interactive, telnet_port_file,
ipv4_multicast_loopback, ipv6_multicast_loopback, log_level, config):
# pylint:disable=too-many-statements
log_file_name = "rift.log" # TODO: Make this configurable
if "RIFT_TEST_RESULTS_DIR" in os.environ:
log_file_name = os.environ["RIFT_TEST_RESULTS_DIR"] + "/" + log_file_name
logging.basicConfig(
filename=log_file_name,
format='%(asctime)s:%(levelname)s:%(name)s:%(message)s',
level=log_level)
self._run_which_nodes = run_which_nodes
self._interactive = interactive
self._telnet_port_file = telnet_port_file
self.ipv4_multicast_loopback = ipv4_multicast_loopback
self.ipv6_multicast_loopback = ipv6_multicast_loopback
self._config = config
if self.nr_nodes() > 1:
self._stand_alone = False
self.simulated_interfaces = True
self.physical_interface_name = self.default_physical_interface()
else:
self._stand_alone = True
self.simulated_interfaces = False
self.physical_interface_name = None
self.tx_src_address = self.read_global_configuration(config, 'tx_src_address', '')
self.floodred_enabled = self.read_global_configuration(config, 'flooding_reduction', True)
self.floodred_redundancy = self.read_global_configuration(
config,
'flooding_reduction_redundancy',
constants.DEFAULT_FLOODING_REDUCTION_REDUNDANCY)
self.floodred_similarity = self.read_global_configuration(
config,
'flooding_reduction_similarity',
constants.DEFAULT_FLOODING_REDUCTION_SIMILARITY)
self.floodred_system_random = random.randint(0, 0xffffffffffffffff)
self.intf_traffic_stats_group = stats.Group()
self.intf_security_stats_group = stats.Group()
self.intf_lie_fsm_stats_group = stats.Group()
self.node_ztp_fsm_stats_group = stats.Group()
self.keys = {} # Indexed by key-id
self.keys[0] = key.Key(key_id=0, algorithm="null", secret="")
self._nodes = sortedcontainers.SortedDict()
self.create_configuration(passive_nodes)
cli_log = logging.getLogger('cli')
if self._nodes:
first_node = self._nodes.peekitem(0)[1]
else:
first_node = None
if self._interactive:
make_terminal_unbuffered()
self._cli_listen_handler = None
self._interactive_cli_session_handler = cli_session_handler.CliSessionHandler(
sock=None,
rx_fd=sys.stdin.fileno(),
tx_fd=sys.stdout.fileno(),
parse_tree=self.parse_tree,
command_handler=self,
log=cli_log,
node=first_node)
else:
self._cli_listen_handler = cli_listen_handler.CliListenHandler(
command_tree=self.parse_tree,
command_handler=self,
log=cli_log,
default_node=first_node)
self._interactive_cli_session_handler = None
if self._telnet_port_file is None:
print("Command Line Interface (CLI) available on port {}"
.format(self._cli_listen_handler.port))
else:
try:
with open(self._telnet_port_file, 'w') as file:
print(self._cli_listen_handler.port, file=file)
except IOError:
pass # TODO: Log an error
def default_physical_interface(self):
# When simulated interfaces are disabled, the interface names on nodes correspond to real
# interfaces on the host platform.
# When simulated interface are enabled, the interface names on nodes are "fake" i.e. they do
# not correspond to real interfaces on the host platform. All these simulated interfaces
# actually run on a single real interface, referred to as the physical interface. Traffic to
# and from different simulated interfaces are distinguished by using different multicast
# addresses and port numbers for each simulated interface.
# Pick the first interface with a broadcast IPv4 address (if any) as the default.
for intf_name in netifaces.interfaces():
addresses = netifaces.ifaddresses(intf_name)
if netifaces.AF_INET in addresses:
ipv4_addresses = addresses[netifaces.AF_INET]
for ipv4_address in ipv4_addresses:
if 'broadcast' in ipv4_address:
return intf_name
print("Cannot pick default physical interface: no broadcast interface found")
sys.exit(1)
def nr_nodes(self):
total_nr_nodes = 0
if 'shards' in self._config:
for shard_config in self._config['shards']:
if 'nodes' in shard_config:
total_nr_nodes += len(shard_config['nodes'])
return total_nr_nodes
def read_global_configuration(self, config, attribute, default):
# TODO: Get rid of const
if ('const' in config) and (config['const'] is not None) and (attribute in config['const']):
return config['const'][attribute]
if attribute in config:
return config[attribute]
return default
def create_configuration(self, passive_nodes):
if 'authentication_keys' in self._config:
for key_config in self._config['authentication_keys']:
self.create_key(key_config)
if 'shards' in self._config:
for shard_config in self._config['shards']:
self.create_shard(shard_config, passive_nodes)
def create_key(self, key_config):
key_id = key_config["id"]
algorithm = key_config["algorithm"]
secret = key_config["secret"]
self.keys[key_id] = key.Key(key_id, algorithm, secret)
def key_id_to_key(self, key_id):
if key_id is None:
return None
if key_id not in self.keys:
return None
return self.keys[key_id]
def key_ids_to_keys(self, key_ids):
if key_ids is None:
return []
return [self.key_id_to_key(key_id) for key_id in key_ids]
def create_shard(self, shard_config, passive_nodes):
if 'nodes' in shard_config:
for node_config in shard_config['nodes']:
if 'name' in node_config:
force_passive = node_config['name'] in passive_nodes
else:
force_passive = False
self.create_node(node_config, force_passive)
def create_node(self, node_config, force_passive):
new_node = node.Node(node_config, self, force_passive, self._stand_alone)
self._nodes[new_node.name] = new_node
def run(self):
scheduler.SCHEDULER.run()
def command_clear_engine_stats(self, _cli_session):
self.intf_traffic_stats_group.clear()
self.intf_security_stats_group.clear()
self.intf_lie_fsm_stats_group.clear()
self.node_ztp_fsm_stats_group.clear()
scheduler.SCHEDULER.slip_count_10ms = 0
scheduler.SCHEDULER.slip_count_100ms = 0
scheduler.SCHEDULER.slip_count_1000ms = 0
scheduler.SCHEDULER.max_pending_events_proc_time = 0.0
scheduler.SCHEDULER.max_expired_timers_proc_time = 0.0
scheduler.SCHEDULER.max_select_proc_time = 0.0
scheduler.SCHEDULER.max_ready_to_read_proc_time = 0.0
def command_clear_intf_stats(self, cli_session, parameters):
cli_session.current_node.command_clear_intf_stats(cli_session, parameters)
def command_clear_node_stats(self, cli_session):
cli_session.current_node.command_clear_node_stats(cli_session)
def command_show_engine(self, cli_session):
tab = table.Table(separators=False)
tab.add_row(["Stand-alone", self._stand_alone])
tab.add_row(["Interactive", self._interactive])
tab.add_row(["Simulated Interfaces", self.simulated_interfaces])
tab.add_row(["Physical Interface", self.physical_interface_name])
tab.add_row(["Telnet Port File", self._telnet_port_file])
tab.add_row(["IPv4 Multicast Loopback", self.ipv4_multicast_loopback])
tab.add_row(["IPv6 Multicast Loopback", self.ipv6_multicast_loopback])
tab.add_row(["Number of Nodes", self.nr_nodes()])
tab.add_row(["Transmit Source Address", self.tx_src_address])
tab.add_row(["Flooding Reduction Enabled", self.floodred_enabled])
tab.add_row(["Flooding Reduction Redundancy", self.floodred_redundancy])
tab.add_row(["Flooding Reduction Similarity", self.floodred_similarity])
tab.add_row(["Flooding Reduction System Random", self.floodred_system_random])
tab.add_row(["Timer slips > 10ms", scheduler.SCHEDULER.slip_count_10ms])
tab.add_row(["Timer slips > 100ms", scheduler.SCHEDULER.slip_count_100ms])
tab.add_row(["Timer slips > 1000ms", scheduler.SCHEDULER.slip_count_1000ms])
tab.add_row(["Max pending events processing time",
"{:06f}".format(scheduler.SCHEDULER.max_pending_events_proc_time)])
tab.add_row(["Max expired timers processing time",
"{:06f}".format(scheduler.SCHEDULER.max_expired_timers_proc_time)])
tab.add_row(["Max select processing time",
"{:06f}".format(scheduler.SCHEDULER.max_select_proc_time)])
tab.add_row(["Max ready-to-read processing time",
"{:06f}".format(scheduler.SCHEDULER.max_ready_to_read_proc_time)])
cli_session.print(tab.to_string())
def command_show_engine_stats(self, cli_session, exclude_zero=False):
cli_session.print("All Node ZTP FSMs:")
tab = self.node_ztp_fsm_stats_group.table(exclude_zero)
cli_session.print(tab.to_string())
cli_session.print("All Interfaces Traffic:")
tab = self.intf_traffic_stats_group.table(exclude_zero)
cli_session.print(tab.to_string())
cli_session.print("All Interfaces Security:")
tab = self.intf_security_stats_group.table(exclude_zero)
cli_session.print(tab.to_string())
cli_session.print("All Interface LIE FSMs:")
tab = self.intf_lie_fsm_stats_group.table(exclude_zero)
cli_session.print(tab.to_string())
def command_show_eng_stats_ex_zero(self, cli_session):
self.command_show_engine_stats(cli_session, True)
def command_show_flooding_reduction(self, cli_session):
cli_session.current_node.command_show_flooding_reduction(cli_session)
def command_show_intf_fsm_nvhis(self, cli_session, parameters):
cli_session.current_node.command_show_intf_fsm_hist(cli_session, parameters, False)
def command_show_intf_fsm_vhis(self, cli_session, parameters):
cli_session.current_node.command_show_intf_fsm_hist(cli_session, parameters, True)
def command_show_intf_packets(self, cli_session, parameters):
cli_session.current_node.command_show_intf_packets(cli_session, parameters)
def command_show_intf_queues(self, cli_session, parameters):
cli_session.current_node.command_show_intf_queues(cli_session, parameters)
def command_show_intf_security(self, cli_session, parameters):
cli_session.current_node.command_show_intf_security(cli_session, parameters)
def command_show_intf_sockets(self, cli_session, parameters):
cli_session.current_node.command_show_intf_sockets(cli_session, parameters)
def command_show_intf_stats(self, cli_session, parameters):
cli_session.current_node.command_show_intf_stats(cli_session, parameters, False)
def command_show_intf_stats_ex_zero(self, cli_session, parameters):
cli_session.current_node.command_show_intf_stats(cli_session, parameters, True)
def command_show_intf_tides(self, cli_session, parameters):
cli_session.current_node.command_show_intf_tides(cli_session, parameters)
def command_show_interface(self, cli_session, parameters):
cli_session.current_node.command_show_interface(cli_session, parameters)
def command_set_interface_failure(self, cli_session, parameters):
cli_session.current_node.command_set_interface_failure(cli_session, parameters)
def command_show_interfaces(self, cli_session):
cli_session.current_node.command_show_interfaces(cli_session)
def command_show_neighbors(self, cli_session):
cli_session.current_node.command_show_neighbors(cli_session)
def command_show_bw_balancing(self, cli_session):
cli_session.current_node.command_show_bw_balancing(cli_session)
def command_show_kernel_addresses(self, cli_session):
cli_session.current_node.command_show_kernel_addresses(cli_session)
def command_show_kernel_links(self, cli_session):
cli_session.current_node.command_show_kernel_links(cli_session)
def command_show_kernel_routes(self, cli_session):
cli_session.current_node.command_show_kernel_routes(cli_session)
def command_show_kernel_routes_tab(self, cli_session, parameters):
cli_session.current_node.command_show_kernel_routes_tab(cli_session, parameters)
def command_show_kernel_routes_pref(self, cli_session, parameters):
cli_session.current_node.command_show_kernel_routes_pref(cli_session, parameters)
def command_show_lie_fsm(self, cli_session):
interface.Interface.fsm_definition.command_show_fsm(cli_session)
def command_show_node(self, cli_session):
cli_session.current_node.command_show_node(cli_session)
def command_show_node_fsm_nvhis(self, cli_session):
cli_session.current_node.command_show_node_fsm_history(cli_session, False)
def command_show_node_fsm_vhis(self, cli_session):
cli_session.current_node.command_show_node_fsm_history(cli_session, True)
def command_show_node_stats(self, cli_session):
cli_session.current_node.command_show_node_stats(cli_session, False)
def command_show_node_stats_ex_zero(self, cli_session):
cli_session.current_node.command_show_node_stats(cli_session, True)
def command_show_nodes(self, cli_session):
tab = table.Table()
tab.add_row(node.Node.cli_summary_headers())
for nod in self._nodes.values():
tab.add_row(nod.cli_summary_attributes())
cli_session.print(tab.to_string())
def command_show_nodes_level(self, cli_session):
tab = table.Table()
tab.add_row(node.Node.cli_level_headers())
for nod in self._nodes.values():
tab.add_row(nod.cli_level_attributes())
cli_session.print(tab.to_string())
def command_show_route_prefix(self, cli_session, parameters):
cli_session.current_node.command_show_route_prefix(cli_session, parameters)
def command_show_route_prefix_owner(self, cli_session, parameters):
cli_session.current_node.command_show_route_prefix_owner(cli_session, parameters)
def command_show_routes(self, cli_session):
cli_session.current_node.command_show_routes(cli_session)
def command_show_routes_family(self, cli_session, parameters):
cli_session.current_node.command_show_routes_family(cli_session, parameters)
def command_show_forwarding(self, cli_session):
cli_session.current_node.command_show_forwarding(cli_session)
def command_show_forwarding_prefix(self, cli_session, parameters):
cli_session.current_node.command_show_forwarding_prefix(cli_session, parameters)
def command_show_forwarding_family(self, cli_session, parameters):
cli_session.current_node.command_show_forwarding_family(cli_session, parameters)
def command_show_disaggregation(self, cli_session):
cli_session.current_node.command_show_disaggregation(cli_session)
def command_show_security(self, cli_session):
cli_session.current_node.command_show_security(cli_session)
def command_show_spf(self, cli_session):
cli_session.current_node.command_show_spf(cli_session)
def command_show_spf_dir(self, cli_session, parameters):
cli_session.current_node.command_show_spf_dir(cli_session, parameters)
def command_show_spf_dir_dest(self, cli_session, parameters):
cli_session.current_node.command_show_spf_dir_dest(cli_session, parameters)
def command_show_tie_db(self, cli_session):
cli_session.current_node.command_show_tie_db(cli_session)
def command_show_tie_db_dir(self, cli_session, parameters):
cli_session.current_node.command_show_tie_db_dir(cli_session, parameters)
def command_show_tie_db_dir_orig(self, cli_session, parameters):
cli_session.current_node.command_show_tie_db_dir_orig(cli_session, parameters)
def command_show_tie_db_dir_orig_type(self, cli_session, parameters):
# pylint:disable=invalid-name
cli_session.current_node.command_show_tie_db_dir_orig_type(cli_session, parameters)
def command_show_ztp_fsm(self, cli_session):
node.Node.fsm_definition.command_show_fsm(cli_session)
def command_set_node(self, cli_session, parameters):
node_name = parameters['node']
if node_name in self._nodes:
cli_session.set_current_node(self._nodes[node_name])
else:
cli_session.print("Node {} does not exist".format(node_name))
def command_set_level(self, cli_session, parameters):
level_symbol = parameters['level'].lower()
parsed_level = node.Node.parse_level_symbol(level_symbol)
if parsed_level is None:
cli_session.print("Invalid level value (expected undefined, leaf, leaf-to-leaf, "
"top-of-fabric, or number)")
return
cli_session.current_node.fsm.push_event(node.Node.Event.CHANGE_LOCAL_CONFIGURED_LEVEL,
level_symbol)
def command_exit(self, cli_session):
cli_session.close()
def command_help(self, cli_session):
cli_session.help()
def command_stop(self, cli_session):
cli_session.close()
sys.exit(0)
parse_tree = {
"clear": {
"engine": {
"statistics": command_clear_engine_stats
},
"$interface": {
"statistics": command_clear_intf_stats
},
"node": {
"statistics": command_clear_node_stats
}
},
"exit": command_exit,
"help": command_help,
"set": {
"$interface": {
"$failure": command_set_interface_failure
},
"$node": command_set_node,
"$level": command_set_level
},
"show": {
"bandwidth-balancing": command_show_bw_balancing,
"disaggregation": command_show_disaggregation,
"engine": {
"": command_show_engine,
"statistics": {
"": command_show_engine_stats,
"exclude-zero": command_show_eng_stats_ex_zero
}
},
"flooding-reduction": command_show_flooding_reduction,
"forwarding": {
"": command_show_forwarding,
"$prefix": command_show_forwarding_prefix,
"$family": command_show_forwarding_family,
},
"fsm": {
"lie": command_show_lie_fsm,
"ztp": command_show_ztp_fsm,
},
"$interface": {
"": command_show_interface,
"fsm": {
"history": command_show_intf_fsm_nvhis,
"verbose-history": command_show_intf_fsm_vhis,
},
"packets": command_show_intf_packets,
"queues": command_show_intf_queues,
"security": command_show_intf_security,
"sockets": command_show_intf_sockets,
"statistics": {
"": command_show_intf_stats,
"exclude-zero": command_show_intf_stats_ex_zero
},
"tides": command_show_intf_tides
},
"interfaces": command_show_interfaces,
"kernel": {
"addresses": command_show_kernel_addresses,
"links": command_show_kernel_links,
"routes": {
"": command_show_kernel_routes,
"$table": {
"": command_show_kernel_routes_tab,
"$prefix": command_show_kernel_routes_pref
},
},
},
"neighbors": command_show_neighbors,
"node": {
"": command_show_node,
"fsm": {
"history": command_show_node_fsm_nvhis,
"verbose-history": command_show_node_fsm_vhis,
},
"statistics": {
"": command_show_node_stats,
"exclude-zero": command_show_node_stats_ex_zero
}
},
"nodes": {
"": command_show_nodes,
"level": command_show_nodes_level,
},
"routes": {
"": command_show_routes,
"$prefix": {
"": command_show_route_prefix,
"$owner": command_show_route_prefix_owner,
},
"$family": command_show_routes_family,
},
"security": command_show_security,
"spf": {
"": command_show_spf,
"$direction" : {
"": command_show_spf_dir,
"$destination": command_show_spf_dir_dest
},
},
"tie-db": {
"": command_show_tie_db,
"$direction": {
"": command_show_tie_db_dir,
"$originator": {
"": command_show_tie_db_dir_orig,
"$tie-type": command_show_tie_db_dir_orig_type
}
}
}
},
"stop": command_stop,
}
@property
def active_nodes(self):
return self._run_which_nodes
| 42.952813 | 100 | 0.662568 | import atexit
import logging
import random
import os
import sys
import termios
import sortedcontainers
import netifaces
import cli_listen_handler
import cli_session_handler
import constants
import interface
import key
import node
import scheduler
import stats
import table
OLD_TERMINAL_SETTINGS = None
def make_terminal_unbuffered():
global OLD_TERMINAL_SETTINGS
OLD_TERMINAL_SETTINGS = termios.tcgetattr(sys.stdin)
new_settings = termios.tcgetattr(sys.stdin)
new_settings[3] = new_settings[3] & ~(termios.ECHO | termios.ICANON)
new_settings[6][termios.VMIN] = 0
new_settings[6][termios.VTIME] = 0
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, new_settings)
@atexit.register
def restore_terminal():
global OLD_TERMINAL_SETTINGS
if OLD_TERMINAL_SETTINGS:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, OLD_TERMINAL_SETTINGS)
OLD_TERMINAL_SETTINGS = None
class Engine:
def __init__(self, passive_nodes, run_which_nodes, interactive, telnet_port_file,
ipv4_multicast_loopback, ipv6_multicast_loopback, log_level, config):
log_file_name = "rift.log"
if "RIFT_TEST_RESULTS_DIR" in os.environ:
log_file_name = os.environ["RIFT_TEST_RESULTS_DIR"] + "/" + log_file_name
logging.basicConfig(
filename=log_file_name,
format='%(asctime)s:%(levelname)s:%(name)s:%(message)s',
level=log_level)
self._run_which_nodes = run_which_nodes
self._interactive = interactive
self._telnet_port_file = telnet_port_file
self.ipv4_multicast_loopback = ipv4_multicast_loopback
self.ipv6_multicast_loopback = ipv6_multicast_loopback
self._config = config
if self.nr_nodes() > 1:
self._stand_alone = False
self.simulated_interfaces = True
self.physical_interface_name = self.default_physical_interface()
else:
self._stand_alone = True
self.simulated_interfaces = False
self.physical_interface_name = None
self.tx_src_address = self.read_global_configuration(config, 'tx_src_address', '')
self.floodred_enabled = self.read_global_configuration(config, 'flooding_reduction', True)
self.floodred_redundancy = self.read_global_configuration(
config,
'flooding_reduction_redundancy',
constants.DEFAULT_FLOODING_REDUCTION_REDUNDANCY)
self.floodred_similarity = self.read_global_configuration(
config,
'flooding_reduction_similarity',
constants.DEFAULT_FLOODING_REDUCTION_SIMILARITY)
self.floodred_system_random = random.randint(0, 0xffffffffffffffff)
self.intf_traffic_stats_group = stats.Group()
self.intf_security_stats_group = stats.Group()
self.intf_lie_fsm_stats_group = stats.Group()
self.node_ztp_fsm_stats_group = stats.Group()
self.keys = {}
self.keys[0] = key.Key(key_id=0, algorithm="null", secret="")
self._nodes = sortedcontainers.SortedDict()
self.create_configuration(passive_nodes)
cli_log = logging.getLogger('cli')
if self._nodes:
first_node = self._nodes.peekitem(0)[1]
else:
first_node = None
if self._interactive:
make_terminal_unbuffered()
self._cli_listen_handler = None
self._interactive_cli_session_handler = cli_session_handler.CliSessionHandler(
sock=None,
rx_fd=sys.stdin.fileno(),
tx_fd=sys.stdout.fileno(),
parse_tree=self.parse_tree,
command_handler=self,
log=cli_log,
node=first_node)
else:
self._cli_listen_handler = cli_listen_handler.CliListenHandler(
command_tree=self.parse_tree,
command_handler=self,
log=cli_log,
default_node=first_node)
self._interactive_cli_session_handler = None
if self._telnet_port_file is None:
print("Command Line Interface (CLI) available on port {}"
.format(self._cli_listen_handler.port))
else:
try:
with open(self._telnet_port_file, 'w') as file:
print(self._cli_listen_handler.port, file=file)
except IOError:
pass
def default_physical_interface(self):
for intf_name in netifaces.interfaces():
addresses = netifaces.ifaddresses(intf_name)
if netifaces.AF_INET in addresses:
ipv4_addresses = addresses[netifaces.AF_INET]
for ipv4_address in ipv4_addresses:
if 'broadcast' in ipv4_address:
return intf_name
print("Cannot pick default physical interface: no broadcast interface found")
sys.exit(1)
def nr_nodes(self):
total_nr_nodes = 0
if 'shards' in self._config:
for shard_config in self._config['shards']:
if 'nodes' in shard_config:
total_nr_nodes += len(shard_config['nodes'])
return total_nr_nodes
def read_global_configuration(self, config, attribute, default):
if ('const' in config) and (config['const'] is not None) and (attribute in config['const']):
return config['const'][attribute]
if attribute in config:
return config[attribute]
return default
def create_configuration(self, passive_nodes):
if 'authentication_keys' in self._config:
for key_config in self._config['authentication_keys']:
self.create_key(key_config)
if 'shards' in self._config:
for shard_config in self._config['shards']:
self.create_shard(shard_config, passive_nodes)
def create_key(self, key_config):
key_id = key_config["id"]
algorithm = key_config["algorithm"]
secret = key_config["secret"]
self.keys[key_id] = key.Key(key_id, algorithm, secret)
def key_id_to_key(self, key_id):
if key_id is None:
return None
if key_id not in self.keys:
return None
return self.keys[key_id]
def key_ids_to_keys(self, key_ids):
if key_ids is None:
return []
return [self.key_id_to_key(key_id) for key_id in key_ids]
def create_shard(self, shard_config, passive_nodes):
if 'nodes' in shard_config:
for node_config in shard_config['nodes']:
if 'name' in node_config:
force_passive = node_config['name'] in passive_nodes
else:
force_passive = False
self.create_node(node_config, force_passive)
def create_node(self, node_config, force_passive):
new_node = node.Node(node_config, self, force_passive, self._stand_alone)
self._nodes[new_node.name] = new_node
def run(self):
scheduler.SCHEDULER.run()
def command_clear_engine_stats(self, _cli_session):
self.intf_traffic_stats_group.clear()
self.intf_security_stats_group.clear()
self.intf_lie_fsm_stats_group.clear()
self.node_ztp_fsm_stats_group.clear()
scheduler.SCHEDULER.slip_count_10ms = 0
scheduler.SCHEDULER.slip_count_100ms = 0
scheduler.SCHEDULER.slip_count_1000ms = 0
scheduler.SCHEDULER.max_pending_events_proc_time = 0.0
scheduler.SCHEDULER.max_expired_timers_proc_time = 0.0
scheduler.SCHEDULER.max_select_proc_time = 0.0
scheduler.SCHEDULER.max_ready_to_read_proc_time = 0.0
def command_clear_intf_stats(self, cli_session, parameters):
cli_session.current_node.command_clear_intf_stats(cli_session, parameters)
def command_clear_node_stats(self, cli_session):
cli_session.current_node.command_clear_node_stats(cli_session)
def command_show_engine(self, cli_session):
tab = table.Table(separators=False)
tab.add_row(["Stand-alone", self._stand_alone])
tab.add_row(["Interactive", self._interactive])
tab.add_row(["Simulated Interfaces", self.simulated_interfaces])
tab.add_row(["Physical Interface", self.physical_interface_name])
tab.add_row(["Telnet Port File", self._telnet_port_file])
tab.add_row(["IPv4 Multicast Loopback", self.ipv4_multicast_loopback])
tab.add_row(["IPv6 Multicast Loopback", self.ipv6_multicast_loopback])
tab.add_row(["Number of Nodes", self.nr_nodes()])
tab.add_row(["Transmit Source Address", self.tx_src_address])
tab.add_row(["Flooding Reduction Enabled", self.floodred_enabled])
tab.add_row(["Flooding Reduction Redundancy", self.floodred_redundancy])
tab.add_row(["Flooding Reduction Similarity", self.floodred_similarity])
tab.add_row(["Flooding Reduction System Random", self.floodred_system_random])
tab.add_row(["Timer slips > 10ms", scheduler.SCHEDULER.slip_count_10ms])
tab.add_row(["Timer slips > 100ms", scheduler.SCHEDULER.slip_count_100ms])
tab.add_row(["Timer slips > 1000ms", scheduler.SCHEDULER.slip_count_1000ms])
tab.add_row(["Max pending events processing time",
"{:06f}".format(scheduler.SCHEDULER.max_pending_events_proc_time)])
tab.add_row(["Max expired timers processing time",
"{:06f}".format(scheduler.SCHEDULER.max_expired_timers_proc_time)])
tab.add_row(["Max select processing time",
"{:06f}".format(scheduler.SCHEDULER.max_select_proc_time)])
tab.add_row(["Max ready-to-read processing time",
"{:06f}".format(scheduler.SCHEDULER.max_ready_to_read_proc_time)])
cli_session.print(tab.to_string())
def command_show_engine_stats(self, cli_session, exclude_zero=False):
cli_session.print("All Node ZTP FSMs:")
tab = self.node_ztp_fsm_stats_group.table(exclude_zero)
cli_session.print(tab.to_string())
cli_session.print("All Interfaces Traffic:")
tab = self.intf_traffic_stats_group.table(exclude_zero)
cli_session.print(tab.to_string())
cli_session.print("All Interfaces Security:")
tab = self.intf_security_stats_group.table(exclude_zero)
cli_session.print(tab.to_string())
cli_session.print("All Interface LIE FSMs:")
tab = self.intf_lie_fsm_stats_group.table(exclude_zero)
cli_session.print(tab.to_string())
def command_show_eng_stats_ex_zero(self, cli_session):
self.command_show_engine_stats(cli_session, True)
def command_show_flooding_reduction(self, cli_session):
cli_session.current_node.command_show_flooding_reduction(cli_session)
def command_show_intf_fsm_nvhis(self, cli_session, parameters):
cli_session.current_node.command_show_intf_fsm_hist(cli_session, parameters, False)
def command_show_intf_fsm_vhis(self, cli_session, parameters):
cli_session.current_node.command_show_intf_fsm_hist(cli_session, parameters, True)
def command_show_intf_packets(self, cli_session, parameters):
cli_session.current_node.command_show_intf_packets(cli_session, parameters)
def command_show_intf_queues(self, cli_session, parameters):
cli_session.current_node.command_show_intf_queues(cli_session, parameters)
def command_show_intf_security(self, cli_session, parameters):
cli_session.current_node.command_show_intf_security(cli_session, parameters)
def command_show_intf_sockets(self, cli_session, parameters):
cli_session.current_node.command_show_intf_sockets(cli_session, parameters)
def command_show_intf_stats(self, cli_session, parameters):
cli_session.current_node.command_show_intf_stats(cli_session, parameters, False)
def command_show_intf_stats_ex_zero(self, cli_session, parameters):
cli_session.current_node.command_show_intf_stats(cli_session, parameters, True)
def command_show_intf_tides(self, cli_session, parameters):
cli_session.current_node.command_show_intf_tides(cli_session, parameters)
def command_show_interface(self, cli_session, parameters):
cli_session.current_node.command_show_interface(cli_session, parameters)
def command_set_interface_failure(self, cli_session, parameters):
cli_session.current_node.command_set_interface_failure(cli_session, parameters)
def command_show_interfaces(self, cli_session):
cli_session.current_node.command_show_interfaces(cli_session)
def command_show_neighbors(self, cli_session):
cli_session.current_node.command_show_neighbors(cli_session)
def command_show_bw_balancing(self, cli_session):
cli_session.current_node.command_show_bw_balancing(cli_session)
def command_show_kernel_addresses(self, cli_session):
cli_session.current_node.command_show_kernel_addresses(cli_session)
def command_show_kernel_links(self, cli_session):
cli_session.current_node.command_show_kernel_links(cli_session)
def command_show_kernel_routes(self, cli_session):
cli_session.current_node.command_show_kernel_routes(cli_session)
def command_show_kernel_routes_tab(self, cli_session, parameters):
cli_session.current_node.command_show_kernel_routes_tab(cli_session, parameters)
def command_show_kernel_routes_pref(self, cli_session, parameters):
cli_session.current_node.command_show_kernel_routes_pref(cli_session, parameters)
def command_show_lie_fsm(self, cli_session):
interface.Interface.fsm_definition.command_show_fsm(cli_session)
def command_show_node(self, cli_session):
cli_session.current_node.command_show_node(cli_session)
def command_show_node_fsm_nvhis(self, cli_session):
cli_session.current_node.command_show_node_fsm_history(cli_session, False)
def command_show_node_fsm_vhis(self, cli_session):
cli_session.current_node.command_show_node_fsm_history(cli_session, True)
def command_show_node_stats(self, cli_session):
cli_session.current_node.command_show_node_stats(cli_session, False)
def command_show_node_stats_ex_zero(self, cli_session):
cli_session.current_node.command_show_node_stats(cli_session, True)
def command_show_nodes(self, cli_session):
tab = table.Table()
tab.add_row(node.Node.cli_summary_headers())
for nod in self._nodes.values():
tab.add_row(nod.cli_summary_attributes())
cli_session.print(tab.to_string())
def command_show_nodes_level(self, cli_session):
tab = table.Table()
tab.add_row(node.Node.cli_level_headers())
for nod in self._nodes.values():
tab.add_row(nod.cli_level_attributes())
cli_session.print(tab.to_string())
def command_show_route_prefix(self, cli_session, parameters):
cli_session.current_node.command_show_route_prefix(cli_session, parameters)
def command_show_route_prefix_owner(self, cli_session, parameters):
cli_session.current_node.command_show_route_prefix_owner(cli_session, parameters)
def command_show_routes(self, cli_session):
cli_session.current_node.command_show_routes(cli_session)
def command_show_routes_family(self, cli_session, parameters):
cli_session.current_node.command_show_routes_family(cli_session, parameters)
def command_show_forwarding(self, cli_session):
cli_session.current_node.command_show_forwarding(cli_session)
def command_show_forwarding_prefix(self, cli_session, parameters):
cli_session.current_node.command_show_forwarding_prefix(cli_session, parameters)
def command_show_forwarding_family(self, cli_session, parameters):
cli_session.current_node.command_show_forwarding_family(cli_session, parameters)
def command_show_disaggregation(self, cli_session):
cli_session.current_node.command_show_disaggregation(cli_session)
def command_show_security(self, cli_session):
cli_session.current_node.command_show_security(cli_session)
def command_show_spf(self, cli_session):
cli_session.current_node.command_show_spf(cli_session)
def command_show_spf_dir(self, cli_session, parameters):
cli_session.current_node.command_show_spf_dir(cli_session, parameters)
def command_show_spf_dir_dest(self, cli_session, parameters):
cli_session.current_node.command_show_spf_dir_dest(cli_session, parameters)
def command_show_tie_db(self, cli_session):
cli_session.current_node.command_show_tie_db(cli_session)
def command_show_tie_db_dir(self, cli_session, parameters):
cli_session.current_node.command_show_tie_db_dir(cli_session, parameters)
def command_show_tie_db_dir_orig(self, cli_session, parameters):
cli_session.current_node.command_show_tie_db_dir_orig(cli_session, parameters)
def command_show_tie_db_dir_orig_type(self, cli_session, parameters):
cli_session.current_node.command_show_tie_db_dir_orig_type(cli_session, parameters)
def command_show_ztp_fsm(self, cli_session):
node.Node.fsm_definition.command_show_fsm(cli_session)
def command_set_node(self, cli_session, parameters):
node_name = parameters['node']
if node_name in self._nodes:
cli_session.set_current_node(self._nodes[node_name])
else:
cli_session.print("Node {} does not exist".format(node_name))
def command_set_level(self, cli_session, parameters):
level_symbol = parameters['level'].lower()
parsed_level = node.Node.parse_level_symbol(level_symbol)
if parsed_level is None:
cli_session.print("Invalid level value (expected undefined, leaf, leaf-to-leaf, "
"top-of-fabric, or number)")
return
cli_session.current_node.fsm.push_event(node.Node.Event.CHANGE_LOCAL_CONFIGURED_LEVEL,
level_symbol)
def command_exit(self, cli_session):
cli_session.close()
def command_help(self, cli_session):
cli_session.help()
def command_stop(self, cli_session):
cli_session.close()
sys.exit(0)
parse_tree = {
"clear": {
"engine": {
"statistics": command_clear_engine_stats
},
"$interface": {
"statistics": command_clear_intf_stats
},
"node": {
"statistics": command_clear_node_stats
}
},
"exit": command_exit,
"help": command_help,
"set": {
"$interface": {
"$failure": command_set_interface_failure
},
"$node": command_set_node,
"$level": command_set_level
},
"show": {
"bandwidth-balancing": command_show_bw_balancing,
"disaggregation": command_show_disaggregation,
"engine": {
"": command_show_engine,
"statistics": {
"": command_show_engine_stats,
"exclude-zero": command_show_eng_stats_ex_zero
}
},
"flooding-reduction": command_show_flooding_reduction,
"forwarding": {
"": command_show_forwarding,
"$prefix": command_show_forwarding_prefix,
"$family": command_show_forwarding_family,
},
"fsm": {
"lie": command_show_lie_fsm,
"ztp": command_show_ztp_fsm,
},
"$interface": {
"": command_show_interface,
"fsm": {
"history": command_show_intf_fsm_nvhis,
"verbose-history": command_show_intf_fsm_vhis,
},
"packets": command_show_intf_packets,
"queues": command_show_intf_queues,
"security": command_show_intf_security,
"sockets": command_show_intf_sockets,
"statistics": {
"": command_show_intf_stats,
"exclude-zero": command_show_intf_stats_ex_zero
},
"tides": command_show_intf_tides
},
"interfaces": command_show_interfaces,
"kernel": {
"addresses": command_show_kernel_addresses,
"links": command_show_kernel_links,
"routes": {
"": command_show_kernel_routes,
"$table": {
"": command_show_kernel_routes_tab,
"$prefix": command_show_kernel_routes_pref
},
},
},
"neighbors": command_show_neighbors,
"node": {
"": command_show_node,
"fsm": {
"history": command_show_node_fsm_nvhis,
"verbose-history": command_show_node_fsm_vhis,
},
"statistics": {
"": command_show_node_stats,
"exclude-zero": command_show_node_stats_ex_zero
}
},
"nodes": {
"": command_show_nodes,
"level": command_show_nodes_level,
},
"routes": {
"": command_show_routes,
"$prefix": {
"": command_show_route_prefix,
"$owner": command_show_route_prefix_owner,
},
"$family": command_show_routes_family,
},
"security": command_show_security,
"spf": {
"": command_show_spf,
"$direction" : {
"": command_show_spf_dir,
"$destination": command_show_spf_dir_dest
},
},
"tie-db": {
"": command_show_tie_db,
"$direction": {
"": command_show_tie_db_dir,
"$originator": {
"": command_show_tie_db_dir_orig,
"$tie-type": command_show_tie_db_dir_orig_type
}
}
}
},
"stop": command_stop,
}
@property
def active_nodes(self):
return self._run_which_nodes
| true | true |
f72e364f5022fc04b93bc2dd298a9bd20a6cf030 | 1,337 | py | Python | openpype/hosts/blender/plugins/create/create_layout.py | dangerstudios/OpenPype | 10ddcc4699137888616eec57cd7fac9648189714 | [
"MIT"
] | null | null | null | openpype/hosts/blender/plugins/create/create_layout.py | dangerstudios/OpenPype | 10ddcc4699137888616eec57cd7fac9648189714 | [
"MIT"
] | null | null | null | openpype/hosts/blender/plugins/create/create_layout.py | dangerstudios/OpenPype | 10ddcc4699137888616eec57cd7fac9648189714 | [
"MIT"
] | null | null | null | """Create a layout asset."""
import bpy
from avalon import api
from avalon.blender import lib
import openpype.hosts.blender.api.plugin
class CreateLayout(openpype.hosts.blender.api.plugin.Creator):
"""Layout output for character rigs"""
name = "layoutMain"
label = "Layout"
family = "layout"
icon = "cubes"
def process(self):
asset = self.data["asset"]
subset = self.data["subset"]
name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
collection = bpy.data.collections.new(name=name)
bpy.context.scene.collection.children.link(collection)
self.data['task'] = api.Session.get('AVALON_TASK')
lib.imprint(collection, self.data)
# Add the rig object and all the children meshes to
# a set and link them all at the end to avoid duplicates.
# Blender crashes if trying to link an object that is already linked.
# This links automatically the children meshes if they were not
# selected, and doesn't link them twice if they, insted,
# were manually selected by the user.
objects_to_link = set()
if (self.options or {}).get("useSelection"):
for obj in lib.get_selection():
collection.children.link(obj.users_collection[0])
return collection
| 32.609756 | 77 | 0.658938 |
import bpy
from avalon import api
from avalon.blender import lib
import openpype.hosts.blender.api.plugin
class CreateLayout(openpype.hosts.blender.api.plugin.Creator):
name = "layoutMain"
label = "Layout"
family = "layout"
icon = "cubes"
def process(self):
asset = self.data["asset"]
subset = self.data["subset"]
name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
collection = bpy.data.collections.new(name=name)
bpy.context.scene.collection.children.link(collection)
self.data['task'] = api.Session.get('AVALON_TASK')
lib.imprint(collection, self.data)
# were manually selected by the user.
objects_to_link = set()
if (self.options or {}).get("useSelection"):
for obj in lib.get_selection():
collection.children.link(obj.users_collection[0])
return collection
| true | true |
f72e36d55617fb3f480bdfd74ae5f0ca81d6dede | 2,358 | py | Python | tools/progressive_iile_render.py | mistajuliax/pbrt-v3-IILE | afda605d92517d2396e494d81465ead22d0c25e1 | [
"BSD-2-Clause"
] | 16 | 2018-10-12T15:29:22.000Z | 2022-03-16T11:24:10.000Z | tools/progressive_iile_render.py | mistajuliax/pbrt-v3-IILE | afda605d92517d2396e494d81465ead22d0c25e1 | [
"BSD-2-Clause"
] | 16 | 2018-02-02T11:49:36.000Z | 2018-04-21T09:07:08.000Z | tools/progressive_iile_render.py | giuliojiang/pbrt-v3-IISPT | b9be01096293ab0f50b14b9043556c93ff9e07ec | [
"BSD-2-Clause"
] | 2 | 2018-12-12T08:49:43.000Z | 2019-12-03T12:20:04.000Z | import os
import subprocess
import time
# =============================================================================
# Constants and settings
# Each has:
# - filepath
# - directSpp
inputFiles = [
# ["/home/gj/git/pbrt-v3-scenes/white-room/whiteroom-daytime.pbrt", 16],
["/home/gj/git/pbrt-v3-scenes-extra/veach-ajar/scene.pbrt", 2],
# ["/home/gj/git/pbrt-v3-custom-scenes/mbed1/scene.pbrt", 64]
]
outputDir = "/home/gj/git/pbrt-v3-IISPT/tmpiile"
maxSpp = 256
# =============================================================================
# Directories configuration
toolsDir = os.path.abspath(os.path.dirname(__file__))
rootDir = os.path.dirname(toolsDir)
binDir = os.path.join(rootDir, "bin")
pbrtPath = os.path.join(binDir, "pbrt")
# =============================================================================
# Function definitions
def runProcess(cmd):
print(">>> {}".format(cmd))
subprocess.call(cmd, shell=False)
def processFileAtQuality(fdata, spp):
fpath, directSpp = fdata
# Generate output file name
fdir = os.path.dirname(fpath)
sceneName = os.path.basename(fdir)
outFileName = "{}_{}.pfm".format(sceneName, spp)
outFilePath = os.path.join(outputDir, outFileName)
statFileName = "{}_{}.txt".format(sceneName, spp)
statFilePath = os.path.join(outputDir, statFileName)
# Skip if already processed
if os.path.exists(statFilePath):
return
# Change working directory
os.chdir(fdir)
# Start timer
timeStart = time.time()
# Start process
cmd = []
cmd.append(pbrtPath)
cmd.append(fpath)
cmd.append(outFilePath)
cmd.append("--iileIndirect={}".format(spp))
cmd.append("--iileDirect={}".format(directSpp))
runProcess(cmd)
# End timer
timeEnd = time.time()
secondsElapsed = timeEnd - timeStart
secondsElapsed = int(secondsElapsed)
# Record on file
statFile = open(statFilePath, "w")
statFile.write("{}\n".format(secondsElapsed))
statFile.close()
def processFile(fdata):
spp = 0
while spp <= maxSpp:
processFileAtQuality(fdata, spp)
if spp == 0:
spp = 1
else:
spp *= 2
def main():
for fdata in inputFiles:
processFile(fdata)
# =============================================================================
# Main
main() | 25.354839 | 79 | 0.563189 | import os
import subprocess
import time
inputFiles = [
["/home/gj/git/pbrt-v3-scenes-extra/veach-ajar/scene.pbrt", 2],
]
outputDir = "/home/gj/git/pbrt-v3-IISPT/tmpiile"
maxSpp = 256
toolsDir = os.path.abspath(os.path.dirname(__file__))
rootDir = os.path.dirname(toolsDir)
binDir = os.path.join(rootDir, "bin")
pbrtPath = os.path.join(binDir, "pbrt")
def runProcess(cmd):
print(">>> {}".format(cmd))
subprocess.call(cmd, shell=False)
def processFileAtQuality(fdata, spp):
fpath, directSpp = fdata
fdir = os.path.dirname(fpath)
sceneName = os.path.basename(fdir)
outFileName = "{}_{}.pfm".format(sceneName, spp)
outFilePath = os.path.join(outputDir, outFileName)
statFileName = "{}_{}.txt".format(sceneName, spp)
statFilePath = os.path.join(outputDir, statFileName)
if os.path.exists(statFilePath):
return
os.chdir(fdir)
timeStart = time.time()
cmd = []
cmd.append(pbrtPath)
cmd.append(fpath)
cmd.append(outFilePath)
cmd.append("--iileIndirect={}".format(spp))
cmd.append("--iileDirect={}".format(directSpp))
runProcess(cmd)
timeEnd = time.time()
secondsElapsed = timeEnd - timeStart
secondsElapsed = int(secondsElapsed)
statFile = open(statFilePath, "w")
statFile.write("{}\n".format(secondsElapsed))
statFile.close()
def processFile(fdata):
spp = 0
while spp <= maxSpp:
processFileAtQuality(fdata, spp)
if spp == 0:
spp = 1
else:
spp *= 2
def main():
for fdata in inputFiles:
processFile(fdata)
main() | true | true |
f72e375f818d6fd833d1ecb3aad6edc6f7e61f69 | 3,565 | py | Python | nvtabular/io/csv.py | miguelusque/NVTabular | e58d318a64d8c1607e91c10b9b5d4a8b48bcab69 | [
"Apache-2.0"
] | 1 | 2021-09-06T10:38:03.000Z | 2021-09-06T10:38:03.000Z | nvtabular/io/csv.py | ksalama/NVTabular | 76e63d9df7b90433d552606e9cf87bd61d7eee3b | [
"Apache-2.0"
] | null | null | null | nvtabular/io/csv.py | ksalama/NVTabular | 76e63d9df7b90433d552606e9cf87bd61d7eee3b | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import dask.dataframe as dd
import dask_cudf
import numpy as np
from dask.bytes import read_bytes
from dask.utils import parse_bytes
from fsspec.core import get_fs_token_paths
from fsspec.utils import infer_compression
from .dataset_engine import DatasetEngine
class CSVDatasetEngine(DatasetEngine):
"""CSVDatasetEngine
Thin wrapper around dask_cudf.read_csv.
"""
def __init__(self, paths, part_size, storage_options=None, cpu=False, **kwargs):
super().__init__(paths, part_size, cpu=cpu, storage_options=storage_options)
self._meta = {}
self.csv_kwargs = kwargs
self.csv_kwargs["storage_options"] = storage_options
# CSV reader needs a list of files
# (Assume flat directory structure if this is a dir)
if len(self.paths) == 1 and self.fs.isdir(self.paths[0]):
self.paths = self.fs.glob(self.fs.sep.join([self.paths[0], "*"]))
def to_ddf(self, columns=None, cpu=None):
# Check if we are using cpu
cpu = self.cpu if cpu is None else cpu
if cpu:
ddf = dd.read_csv(self.paths, blocksize=self.part_size, **self.csv_kwargs)
else:
ddf = dask_cudf.read_csv(self.paths, chunksize=self.part_size, **self.csv_kwargs)
if columns:
ddf = ddf[columns]
return ddf
@property
@functools.lru_cache(1)
def _file_partition_map(self):
ind = 0
_pp_map = {}
for path, blocks in zip(
*_byte_block_counts(
self.paths,
self.part_size,
**self.csv_kwargs,
)
):
_pp_map[path.split(self.fs.sep)[-1]] = np.arange(ind, ind + blocks)
ind += blocks
return _pp_map
def to_cpu(self):
self.cpu = True
def to_gpu(self):
self.cpu = False
def _byte_block_counts(
urlpath,
blocksize,
lineterminator=None,
compression="infer",
storage_options=None,
**kwargs,
):
"""Return a list of paths and block counts.
Logic copied from dask.bytes.read_bytes
"""
if lineterminator is not None and len(lineterminator) == 1:
kwargs["lineterminator"] = lineterminator
else:
lineterminator = "\n"
if compression == "infer":
paths = get_fs_token_paths(urlpath, mode="rb", storage_options=storage_options)[2]
compression = infer_compression(paths[0])
if isinstance(blocksize, str):
blocksize = parse_bytes(blocksize)
if blocksize and compression:
blocksize = None
b_out = read_bytes(
urlpath,
delimiter=lineterminator.encode(),
blocksize=blocksize,
sample=False,
compression=compression,
include_path=True,
**(storage_options or {}),
)
_, values, paths = b_out
if not isinstance(values[0], (tuple, list)):
values = [values]
return paths, [len(v) for v in values]
| 28.98374 | 93 | 0.646283 |
import functools
import dask.dataframe as dd
import dask_cudf
import numpy as np
from dask.bytes import read_bytes
from dask.utils import parse_bytes
from fsspec.core import get_fs_token_paths
from fsspec.utils import infer_compression
from .dataset_engine import DatasetEngine
class CSVDatasetEngine(DatasetEngine):
def __init__(self, paths, part_size, storage_options=None, cpu=False, **kwargs):
super().__init__(paths, part_size, cpu=cpu, storage_options=storage_options)
self._meta = {}
self.csv_kwargs = kwargs
self.csv_kwargs["storage_options"] = storage_options
if len(self.paths) == 1 and self.fs.isdir(self.paths[0]):
self.paths = self.fs.glob(self.fs.sep.join([self.paths[0], "*"]))
def to_ddf(self, columns=None, cpu=None):
cpu = self.cpu if cpu is None else cpu
if cpu:
ddf = dd.read_csv(self.paths, blocksize=self.part_size, **self.csv_kwargs)
else:
ddf = dask_cudf.read_csv(self.paths, chunksize=self.part_size, **self.csv_kwargs)
if columns:
ddf = ddf[columns]
return ddf
@property
@functools.lru_cache(1)
def _file_partition_map(self):
ind = 0
_pp_map = {}
for path, blocks in zip(
*_byte_block_counts(
self.paths,
self.part_size,
**self.csv_kwargs,
)
):
_pp_map[path.split(self.fs.sep)[-1]] = np.arange(ind, ind + blocks)
ind += blocks
return _pp_map
def to_cpu(self):
self.cpu = True
def to_gpu(self):
self.cpu = False
def _byte_block_counts(
urlpath,
blocksize,
lineterminator=None,
compression="infer",
storage_options=None,
**kwargs,
):
if lineterminator is not None and len(lineterminator) == 1:
kwargs["lineterminator"] = lineterminator
else:
lineterminator = "\n"
if compression == "infer":
paths = get_fs_token_paths(urlpath, mode="rb", storage_options=storage_options)[2]
compression = infer_compression(paths[0])
if isinstance(blocksize, str):
blocksize = parse_bytes(blocksize)
if blocksize and compression:
blocksize = None
b_out = read_bytes(
urlpath,
delimiter=lineterminator.encode(),
blocksize=blocksize,
sample=False,
compression=compression,
include_path=True,
**(storage_options or {}),
)
_, values, paths = b_out
if not isinstance(values[0], (tuple, list)):
values = [values]
return paths, [len(v) for v in values]
| true | true |
f72e37b9daa06a19c0e8a61a1f48c9b55ecc8390 | 1,883 | py | Python | python/bifrost/quantize.py | MilesCranmer/bifrost | 951dd4a449850d22cfd74f4db13ecf806fe5cc30 | [
"BSD-3-Clause"
] | 1 | 2017-06-27T10:12:44.000Z | 2017-06-27T10:12:44.000Z | python/bifrost/quantize.py | MilesCranmer/bifrost | 951dd4a449850d22cfd74f4db13ecf806fe5cc30 | [
"BSD-3-Clause"
] | null | null | null | python/bifrost/quantize.py | MilesCranmer/bifrost | 951dd4a449850d22cfd74f4db13ecf806fe5cc30 | [
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2016, The Bifrost Authors. All rights reserved.
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from libbifrost import _bf, _check, _get, _fast_call
from ndarray import asarray
def quantize(src, dst, scale=1.):
src_bf = asarray(src).as_BFarray()
dst_bf = asarray(dst).as_BFarray()
_fast_call(_bf.Quantize, src_bf,
dst_bf,
scale)
return dst
| 48.282051 | 72 | 0.762082 |
from libbifrost import _bf, _check, _get, _fast_call
from ndarray import asarray
def quantize(src, dst, scale=1.):
src_bf = asarray(src).as_BFarray()
dst_bf = asarray(dst).as_BFarray()
_fast_call(_bf.Quantize, src_bf,
dst_bf,
scale)
return dst
| true | true |
f72e39070a3b805ce41684fe891aaff8bfa78820 | 635 | py | Python | torrent/torrent_tracker/manage.py | projectpai/paipass | 8b8e70b6808bf026cf957e240c7eed7bfcf4c55d | [
"MIT"
] | 3 | 2021-04-17T10:20:26.000Z | 2022-03-08T07:36:13.000Z | torrent/torrent_tracker/manage.py | projectpai/paipass | 8b8e70b6808bf026cf957e240c7eed7bfcf4c55d | [
"MIT"
] | null | null | null | torrent/torrent_tracker/manage.py | projectpai/paipass | 8b8e70b6808bf026cf957e240c7eed7bfcf4c55d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tracker_backend.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.863636 | 79 | 0.686614 |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tracker_backend.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true | true |
f72e39b760182363d792931dc557e983045dec1f | 718 | py | Python | leonardo_form_pegastudio/migrations/0019_document.py | dresl/leonardo-form-pegastudio | 915d6328a8ceef2217c896e3c3f0257092f08a16 | [
"BSD-3-Clause"
] | null | null | null | leonardo_form_pegastudio/migrations/0019_document.py | dresl/leonardo-form-pegastudio | 915d6328a8ceef2217c896e3c3f0257092f08a16 | [
"BSD-3-Clause"
] | null | null | null | leonardo_form_pegastudio/migrations/0019_document.py | dresl/leonardo-form-pegastudio | 915d6328a8ceef2217c896e3c3f0257092f08a16 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('leonardo_form_pegastudio', '0018_pegastudioorders_file'),
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('description', models.CharField(max_length=255, blank=True)),
('document', models.FileField(upload_to=b'documents/')),
('uploaded_at', models.DateTimeField(auto_now_add=True)),
],
),
]
| 29.916667 | 114 | 0.607242 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('leonardo_form_pegastudio', '0018_pegastudioorders_file'),
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('description', models.CharField(max_length=255, blank=True)),
('document', models.FileField(upload_to=b'documents/')),
('uploaded_at', models.DateTimeField(auto_now_add=True)),
],
),
]
| true | true |
f72e3a490c25894eea1fb44d63d056ee2372eb30 | 1,781 | py | Python | models/testing/all_autoencoder_li_2019_ffnn.py | inovex/RCIS2021-degradation-bearing-vessels | 27bd1a2e3f08c5b42011596aa98e5ac627a416d6 | [
"MIT"
] | 2 | 2021-06-21T11:40:38.000Z | 2021-12-29T02:40:30.000Z | models/testing/all_autoencoder_li_2019_ffnn.py | chenzhengkun7/RCIS2021-degradation-estimation-bearing-vessels | 27bd1a2e3f08c5b42011596aa98e5ac627a416d6 | [
"MIT"
] | 2 | 2021-04-08T11:30:28.000Z | 2021-04-12T06:41:31.000Z | models/testing/all_autoencoder_li_2019_ffnn.py | chenzhengkun7/RCIS2021-degradation-estimation-bearing-vessels | 27bd1a2e3f08c5b42011596aa98e5ac627a416d6 | [
"MIT"
] | 2 | 2021-06-21T11:40:43.000Z | 2021-12-29T02:36:51.000Z | import pandas as pd
from pre_processing.features import read_feature_dfs
from util.helper import pop_labels, concat_dfs
from health_stage_classification.health_stage_classifiers import cut_fpts
from rul_prediction.ffnn import fit_ffnn
from util.constants import LEARNING_SET, FEATURES_CSV_NAME, FULL_TEST_SET, BASIC_STATISTICAL_FEATURES
from rul_features.learned_features.unsupervised.principal_component_analysis import pca_embedded_data_frame
from util.visualization import plot_rul_comparisons, plot_trainings_history
def all_features_and_autoencoder_li_2019_classifier_ffnn_rul_prediction():
# Input features: statistical features
learning_feature_df_list = read_feature_dfs(LEARNING_SET, FEATURES_CSV_NAME)
# Two-Stage: lei et al 2019
cut_dfs, first_prediction_times = cut_fpts(learning_feature_df_list)
# Visualize FPTs
# plot_fpts(first_prediction_times, learning_feature_df_list, 'root_mean_square')
# Concatenate trainings data
all_bearings = concat_dfs(cut_dfs)
labels = all_bearings.pop('RUL')
all_bearings, pca = pca_embedded_data_frame(all_bearings)
# RUL prediction: FFNN
trainings_history, ffnn = fit_ffnn(X=all_bearings, y=labels, dropout=True, epochs=150)
# Visualize training history and later validation history
plot_trainings_history(trainings_history)
# Visualize predicted RUL in comparison to real RUL
comparison_set = read_feature_dfs(FULL_TEST_SET, FEATURES_CSV_NAME)
comparison_set, first_prediction_times = cut_fpts(comparison_set)
# Remove label
label_data = pop_labels(comparison_set)
# Apply PCA
comparison_set = [pd.DataFrame(pca.transform(df)) for df in comparison_set]
plot_rul_comparisons(comparison_set, label_data=label_data, prediction_model=ffnn)
| 44.525 | 107 | 0.815834 | import pandas as pd
from pre_processing.features import read_feature_dfs
from util.helper import pop_labels, concat_dfs
from health_stage_classification.health_stage_classifiers import cut_fpts
from rul_prediction.ffnn import fit_ffnn
from util.constants import LEARNING_SET, FEATURES_CSV_NAME, FULL_TEST_SET, BASIC_STATISTICAL_FEATURES
from rul_features.learned_features.unsupervised.principal_component_analysis import pca_embedded_data_frame
from util.visualization import plot_rul_comparisons, plot_trainings_history
def all_features_and_autoencoder_li_2019_classifier_ffnn_rul_prediction():
learning_feature_df_list = read_feature_dfs(LEARNING_SET, FEATURES_CSV_NAME)
cut_dfs, first_prediction_times = cut_fpts(learning_feature_df_list)
all_bearings = concat_dfs(cut_dfs)
labels = all_bearings.pop('RUL')
all_bearings, pca = pca_embedded_data_frame(all_bearings)
trainings_history, ffnn = fit_ffnn(X=all_bearings, y=labels, dropout=True, epochs=150)
plot_trainings_history(trainings_history)
comparison_set = read_feature_dfs(FULL_TEST_SET, FEATURES_CSV_NAME)
comparison_set, first_prediction_times = cut_fpts(comparison_set)
label_data = pop_labels(comparison_set)
comparison_set = [pd.DataFrame(pca.transform(df)) for df in comparison_set]
plot_rul_comparisons(comparison_set, label_data=label_data, prediction_model=ffnn)
| true | true |
f72e3b4756a5c4944372e531349c6f09fe1782e0 | 3,024 | py | Python | 4_simple_models/scripts/random_forest_SMOTE_bordeline_1.py | ReyhaneAskari/SLA_violation_classification | 258a3c415cebcd04601e4d794d42d664471df668 | [
"MIT"
] | 2 | 2019-03-25T18:07:10.000Z | 2022-03-06T08:49:49.000Z | 4_simple_models/scripts/random_forest_SMOTE_bordeline_1.py | ReyhaneAskari/SLA_violation_classification | 258a3c415cebcd04601e4d794d42d664471df668 | [
"MIT"
] | null | null | null | 4_simple_models/scripts/random_forest_SMOTE_bordeline_1.py | ReyhaneAskari/SLA_violation_classification | 258a3c415cebcd04601e4d794d42d664471df668 | [
"MIT"
] | 2 | 2018-10-10T01:18:10.000Z | 2018-10-10T03:05:53.000Z | # -*- coding: utf-8 -*-
# In this script we use a simple classifer called naive bayes and try to predict the violations. But before that we use
# some methods to tackle the problem of our skewed dataset. :)
# 11 May 2016
# @author: reyhane_askari
# Universite de Montreal, DIRO
import csv
import numpy as np
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn import metrics
import pandas as pd
from os import chdir, listdir
from pandas import read_csv
from os import path
from random import randint, sample, seed
from collections import OrderedDict
from pandas import DataFrame, Series
import numpy as np
import csv
import codecs
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
sns.set()
import itertools
from sklearn.decomposition import PCA
from unbalanced_dataset import UnderSampler, NearMiss, CondensedNearestNeighbour, OneSidedSelection,\
NeighbourhoodCleaningRule, TomekLinks, ClusterCentroids, OverSampler, SMOTE,\
SMOTETomek, SMOTEENN, EasyEnsemble, BalanceCascade
almost_black = '#262626'
colnames = ['old_index','job_id', 'task_idx','sched_cls', 'priority', 'cpu_requested',
'mem_requested', 'disk', 'violation']
tain_path = r'/home/askrey/Dropbox/Project_step_by_step/3_create_database/csvs/frull_db_2.csv'
X = pd.read_csv(tain_path, header = None, index_col = False ,names = colnames, skiprows = [0], usecols = [3,4,5,6,7])
y = pd.read_csv(tain_path, header = None, index_col = False ,names = colnames, skiprows = [0], usecols = [8])
y = y['violation'].values
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.333, random_state=0)
main_x = X.values
main_y = y
verbose = False
ratio = float(np.count_nonzero(y==1)) / float(np.count_nonzero(y==0))
# 'SMOTE bordeline 1'
bsmote1 = SMOTE(ratio=ratio, verbose=verbose, kind='borderline1')
x, y = bsmote1.fit_transform(main_x, main_y)
ratio = float(np.count_nonzero(y==1)) / float(np.count_nonzero(y==0))
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=.333, random_state=0)
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
clf = RandomForestClassifier(n_estimators=10)
scores = cross_val_score(clf, X_test, y_test)
y_pred = clf.fit(X_train, y_train).predict(X_test)
y_score = clf.fit(X_train, y_train).predict_proba(X_test)[:,1]
mean_accuracy = clf.fit(X_train, y_train).score(X_test,y_test,sample_weight=None)
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_score)
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.savefig('/home/askrey/Dropbox/Project_step_by_step/5_simple_models/new_scripts/random_forest_SMOTE_bordeline_1.pdf')
| 35.162791 | 120 | 0.76422 |
import csv
import numpy as np
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn import metrics
import pandas as pd
from os import chdir, listdir
from pandas import read_csv
from os import path
from random import randint, sample, seed
from collections import OrderedDict
from pandas import DataFrame, Series
import numpy as np
import csv
import codecs
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
sns.set()
import itertools
from sklearn.decomposition import PCA
from unbalanced_dataset import UnderSampler, NearMiss, CondensedNearestNeighbour, OneSidedSelection,\
NeighbourhoodCleaningRule, TomekLinks, ClusterCentroids, OverSampler, SMOTE,\
SMOTETomek, SMOTEENN, EasyEnsemble, BalanceCascade
almost_black = '#262626'
colnames = ['old_index','job_id', 'task_idx','sched_cls', 'priority', 'cpu_requested',
'mem_requested', 'disk', 'violation']
tain_path = r'/home/askrey/Dropbox/Project_step_by_step/3_create_database/csvs/frull_db_2.csv'
X = pd.read_csv(tain_path, header = None, index_col = False ,names = colnames, skiprows = [0], usecols = [3,4,5,6,7])
y = pd.read_csv(tain_path, header = None, index_col = False ,names = colnames, skiprows = [0], usecols = [8])
y = y['violation'].values
main_x = X.values
main_y = y
verbose = False
ratio = float(np.count_nonzero(y==1)) / float(np.count_nonzero(y==0))
bsmote1 = SMOTE(ratio=ratio, verbose=verbose, kind='borderline1')
x, y = bsmote1.fit_transform(main_x, main_y)
ratio = float(np.count_nonzero(y==1)) / float(np.count_nonzero(y==0))
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=.333, random_state=0)
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
clf = RandomForestClassifier(n_estimators=10)
scores = cross_val_score(clf, X_test, y_test)
y_pred = clf.fit(X_train, y_train).predict(X_test)
y_score = clf.fit(X_train, y_train).predict_proba(X_test)[:,1]
mean_accuracy = clf.fit(X_train, y_train).score(X_test,y_test,sample_weight=None)
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_score)
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.savefig('/home/askrey/Dropbox/Project_step_by_step/5_simple_models/new_scripts/random_forest_SMOTE_bordeline_1.pdf')
| true | true |
f72e3b4cd025f89103636fd22e3d20bbea3db413 | 7,354 | py | Python | cca_zoo/data/simulated.py | raamana/cca_zoo | 7137918a6bac098ec20ba998d1774d5335c178dd | [
"MIT"
] | 1 | 2021-06-19T13:57:44.000Z | 2021-06-19T13:57:44.000Z | cca_zoo/data/simulated.py | raamana/cca_zoo | 7137918a6bac098ec20ba998d1774d5335c178dd | [
"MIT"
] | null | null | null | cca_zoo/data/simulated.py | raamana/cca_zoo | 7137918a6bac098ec20ba998d1774d5335c178dd | [
"MIT"
] | null | null | null | import itertools
from typing import List, Union
import numpy as np
from scipy import linalg
from scipy.linalg import block_diag
from ..utils.check_values import _process_parameter
def generate_covariance_data(n: int, view_features: List[int], latent_dims: int = 1,
view_sparsity: List[Union[int, float]] = None,
correlation: Union[List[float], float] = 1,
structure: Union[str, List[str]] = None, sigma: List[float] = None, decay: float = 0.5,
positive=None):
"""
Function to generate CCA dataset with defined population correlation
:param view_sparsity: level of sparsity in features in each view either as number of active variables or percentage active
:param view_features: number of features in each view
:param n: number of samples
:param latent_dims: number of latent dimensions
:param signal: correlation
:param structure: within view covariance structure
:param sigma: gaussian sigma
:param decay: ratio of second signal to first signal
:return: tuple of numpy arrays: view_1, view_2, true weights from view 1, true weights from view 2, overall covariance structure
:Example:
>>> from cca_zoo.data import generate_covariance_data
>>> [train_view_1,train_view_2],[true_weights_1,true_weights_2]=generate_covariance_data(200,[10,10],latent_dims=1,correlation=1)
"""
structure = _process_parameter('structure', structure, 'identity', len(view_features))
view_sparsity = _process_parameter('view_sparsity', view_sparsity, 1, len(view_features))
positive = _process_parameter('positive', positive, False, len(view_features))
sigma = _process_parameter('sigma', sigma, 0.5, len(view_features))
completed = False
while not completed:
try:
mean = np.zeros(sum(view_features))
if not isinstance(correlation, list):
p = np.arange(0, latent_dims)
correlation = correlation * decay ** p
covs = []
true_features = []
for view_p, sparsity, view_structure, view_positive, view_sigma in zip(view_features, view_sparsity,
structure,
positive, sigma):
# Covariance Bit
if view_structure == 'identity':
cov_ = np.eye(view_p)
elif view_structure == 'gaussian':
cov_ = _generate_gaussian_cov(view_p, view_sigma)
elif view_structure == 'toeplitz':
cov_ = _generate_toeplitz_cov(view_p, view_sigma)
elif view_structure == 'random':
cov_ = _generate_random_cov(view_p)
else:
completed = True
print("invalid structure")
break
weights = np.random.normal(size=(view_p, latent_dims))
if sparsity <= 1:
sparsity = np.ceil(sparsity * view_p).astype('int')
if sparsity < view_p:
mask = np.stack(
(np.concatenate(([0] * (view_p - sparsity), [1] * sparsity)).astype(bool),) * latent_dims,
axis=0).T
np.random.shuffle(mask)
while np.sum(np.unique(mask, axis=1, return_counts=True)[1] > 1) > 0 or np.sum(
np.sum(mask, axis=0) == 0) > 0:
np.random.shuffle(mask)
weights = weights * mask
if view_positive:
weights[weights < 0] = 0
weights = _decorrelate_dims(weights, cov_)
weights /= np.sqrt(np.diag((weights.T @ cov_ @ weights)))
true_features.append(weights)
covs.append(cov_)
cov = block_diag(*covs)
splits = np.concatenate(([0], np.cumsum(view_features)))
for i, j in itertools.combinations(range(len(splits) - 1), 2):
cross = np.zeros((view_features[i], view_features[j]))
for _ in range(latent_dims):
A = correlation[_] * np.outer(true_features[i][:, _], true_features[j][:, _])
# Cross Bit
cross += covs[i] @ A @ covs[j]
cov[splits[i]: splits[i] + view_features[i], splits[j]: splits[j] + view_features[j]] = cross
cov[splits[j]: splits[j] + view_features[j], splits[i]: splits[i] + view_features[i]] = cross.T
X = np.zeros((n, sum(view_features)))
chol = np.linalg.cholesky(cov)
for _ in range(n):
X[_, :] = _chol_sample(mean, chol)
views = np.split(X, np.cumsum(view_features)[:-1], axis=1)
completed = True
except:
completed = False
return views, true_features
def generate_simple_data(n: int, view_features: List[int], view_sparsity: List[int] = None,
eps: float = 0):
"""
:param n: number of samples
:param view_features: number of features view 1
:param view_sparsity: number of features view 2
:param eps: gaussian noise std
:return: view1 matrix, view2 matrix, true weights view 1, true weights view 2
:Example:
>>> from cca_zoo.data import generate_simple_data
>>> [train_view_1,train_view_2],[true_weights_1,true_weights_2]=generate_covariance_data(200,[10,10])
"""
z = np.random.normal(0, 1, n)
views = []
true_features = []
for p, sparsity in zip(view_features, view_sparsity):
weights = np.random.normal(size=(p, 1))
if sparsity > 0:
if sparsity < 1:
sparsity = np.ceil(sparsity * p).astype('int')
weights[np.random.choice(np.arange(p), p - sparsity, replace=False)] = 0
gaussian_x = np.random.normal(0, eps, (n, p))
view = np.outer(z, weights)
view += gaussian_x
views.append(view)
true_features.append(weights)
return views, true_features
def _decorrelate_dims(up, cov):
A = up.T @ cov @ up
for k in range(1, A.shape[0]):
up[:, k:] -= np.outer(up[:, k - 1], A[k - 1, k:] / A[k - 1, k - 1])
A = up.T @ cov @ up
return up
def _chol_sample(mean, chol):
return mean + chol @ np.random.standard_normal(mean.size)
def _gaussian(x, mu, sig, dn):
"""
Generate a gaussian covariance matrix
:param x:
:param mu:
:param sig:
:param dn:
"""
return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.))) * dn / (np.sqrt(2 * np.pi) * sig)
def _generate_gaussian_cov(p, sigma):
x = np.linspace(-1, 1, p)
x_tile = np.tile(x, (p, 1))
mu_tile = np.transpose(x_tile)
dn = 2 / (p - 1)
cov = _gaussian(x_tile, mu_tile, sigma, dn)
cov /= cov.max()
return cov
def _generate_toeplitz_cov(p, sigma):
c = np.arange(0, p)
c = sigma ** c
cov = linalg.toeplitz(c, c)
return cov
def _generate_random_cov(p):
cov_ = np.random.rand(p, p)
U, S, Vt = np.linalg.svd(cov_.T @ cov_)
cov = U @ (1 + np.diag(np.random.rand(p))) @ Vt
return cov
| 39.751351 | 133 | 0.565271 | import itertools
from typing import List, Union
import numpy as np
from scipy import linalg
from scipy.linalg import block_diag
from ..utils.check_values import _process_parameter
def generate_covariance_data(n: int, view_features: List[int], latent_dims: int = 1,
view_sparsity: List[Union[int, float]] = None,
correlation: Union[List[float], float] = 1,
structure: Union[str, List[str]] = None, sigma: List[float] = None, decay: float = 0.5,
positive=None):
structure = _process_parameter('structure', structure, 'identity', len(view_features))
view_sparsity = _process_parameter('view_sparsity', view_sparsity, 1, len(view_features))
positive = _process_parameter('positive', positive, False, len(view_features))
sigma = _process_parameter('sigma', sigma, 0.5, len(view_features))
completed = False
while not completed:
try:
mean = np.zeros(sum(view_features))
if not isinstance(correlation, list):
p = np.arange(0, latent_dims)
correlation = correlation * decay ** p
covs = []
true_features = []
for view_p, sparsity, view_structure, view_positive, view_sigma in zip(view_features, view_sparsity,
structure,
positive, sigma):
if view_structure == 'identity':
cov_ = np.eye(view_p)
elif view_structure == 'gaussian':
cov_ = _generate_gaussian_cov(view_p, view_sigma)
elif view_structure == 'toeplitz':
cov_ = _generate_toeplitz_cov(view_p, view_sigma)
elif view_structure == 'random':
cov_ = _generate_random_cov(view_p)
else:
completed = True
print("invalid structure")
break
weights = np.random.normal(size=(view_p, latent_dims))
if sparsity <= 1:
sparsity = np.ceil(sparsity * view_p).astype('int')
if sparsity < view_p:
mask = np.stack(
(np.concatenate(([0] * (view_p - sparsity), [1] * sparsity)).astype(bool),) * latent_dims,
axis=0).T
np.random.shuffle(mask)
while np.sum(np.unique(mask, axis=1, return_counts=True)[1] > 1) > 0 or np.sum(
np.sum(mask, axis=0) == 0) > 0:
np.random.shuffle(mask)
weights = weights * mask
if view_positive:
weights[weights < 0] = 0
weights = _decorrelate_dims(weights, cov_)
weights /= np.sqrt(np.diag((weights.T @ cov_ @ weights)))
true_features.append(weights)
covs.append(cov_)
cov = block_diag(*covs)
splits = np.concatenate(([0], np.cumsum(view_features)))
for i, j in itertools.combinations(range(len(splits) - 1), 2):
cross = np.zeros((view_features[i], view_features[j]))
for _ in range(latent_dims):
A = correlation[_] * np.outer(true_features[i][:, _], true_features[j][:, _])
cross += covs[i] @ A @ covs[j]
cov[splits[i]: splits[i] + view_features[i], splits[j]: splits[j] + view_features[j]] = cross
cov[splits[j]: splits[j] + view_features[j], splits[i]: splits[i] + view_features[i]] = cross.T
X = np.zeros((n, sum(view_features)))
chol = np.linalg.cholesky(cov)
for _ in range(n):
X[_, :] = _chol_sample(mean, chol)
views = np.split(X, np.cumsum(view_features)[:-1], axis=1)
completed = True
except:
completed = False
return views, true_features
def generate_simple_data(n: int, view_features: List[int], view_sparsity: List[int] = None,
eps: float = 0):
z = np.random.normal(0, 1, n)
views = []
true_features = []
for p, sparsity in zip(view_features, view_sparsity):
weights = np.random.normal(size=(p, 1))
if sparsity > 0:
if sparsity < 1:
sparsity = np.ceil(sparsity * p).astype('int')
weights[np.random.choice(np.arange(p), p - sparsity, replace=False)] = 0
gaussian_x = np.random.normal(0, eps, (n, p))
view = np.outer(z, weights)
view += gaussian_x
views.append(view)
true_features.append(weights)
return views, true_features
def _decorrelate_dims(up, cov):
A = up.T @ cov @ up
for k in range(1, A.shape[0]):
up[:, k:] -= np.outer(up[:, k - 1], A[k - 1, k:] / A[k - 1, k - 1])
A = up.T @ cov @ up
return up
def _chol_sample(mean, chol):
return mean + chol @ np.random.standard_normal(mean.size)
def _gaussian(x, mu, sig, dn):
return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.))) * dn / (np.sqrt(2 * np.pi) * sig)
def _generate_gaussian_cov(p, sigma):
x = np.linspace(-1, 1, p)
x_tile = np.tile(x, (p, 1))
mu_tile = np.transpose(x_tile)
dn = 2 / (p - 1)
cov = _gaussian(x_tile, mu_tile, sigma, dn)
cov /= cov.max()
return cov
def _generate_toeplitz_cov(p, sigma):
c = np.arange(0, p)
c = sigma ** c
cov = linalg.toeplitz(c, c)
return cov
def _generate_random_cov(p):
cov_ = np.random.rand(p, p)
U, S, Vt = np.linalg.svd(cov_.T @ cov_)
cov = U @ (1 + np.diag(np.random.rand(p))) @ Vt
return cov
| true | true |
f72e3b59e81104351bc14bbae3b3432d9707d643 | 1,914 | py | Python | tests/test_action_list_sdk_verb_args.py | jschoewe/stackstorm-orion | a5fdb805ff70c3911cb4c74be3f299f9a1c2625f | [
"Apache-2.0"
] | 164 | 2015-01-17T16:08:33.000Z | 2021-08-03T02:34:07.000Z | tests/test_action_list_sdk_verb_args.py | jschoewe/stackstorm-orion | a5fdb805ff70c3911cb4c74be3f299f9a1c2625f | [
"Apache-2.0"
] | 442 | 2015-01-01T11:19:01.000Z | 2017-09-06T23:26:17.000Z | tests/test_action_list_sdk_verb_args.py | EncoreTechnologies/stackstorm-orion | ed6f54ab7a25885ba1313fe52c9bc0d243164aa2 | [
"Apache-2.0"
] | 202 | 2015-01-13T00:37:40.000Z | 2020-11-07T11:30:10.000Z | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
from mock import MagicMock
from orion_base_action_test_case import OrionBaseActionTestCase
from list_sdk_verb_args import ListSdkVerbArgs
__all__ = [
'ListSdkVerbArgsTestCase'
]
class ListSdkVerbArgsTestCase(OrionBaseActionTestCase):
__test__ = True
action_cls = ListSdkVerbArgs
def test_run_connect_fail(self):
action = self.setup_connect_fail()
self.assertRaises(ValueError,
action.run,
"Cirrus.Nodes",
"AddNode")
def test_run_list_verb_arguments(self):
expected = {'verb_arguments': [
{'position': 0,
'name': "node",
'type': "SolarWinds.NCM.Contracts.InformationService.NCMNode",
'optional': False}]}
query_data = []
query_data.append(self.load_yaml("results_list_sdk_verb_args.yaml"))
action = self.get_action_instance(config=self.full_config)
action.connect = MagicMock(return_value="orion")
action.query = MagicMock(side_effect=query_data)
result = action.run("Cirrus.Nodes", "AddNode")
self.assertEqual(result, expected)
| 36.113208 | 76 | 0.694357 |
from mock import MagicMock
from orion_base_action_test_case import OrionBaseActionTestCase
from list_sdk_verb_args import ListSdkVerbArgs
__all__ = [
'ListSdkVerbArgsTestCase'
]
class ListSdkVerbArgsTestCase(OrionBaseActionTestCase):
__test__ = True
action_cls = ListSdkVerbArgs
def test_run_connect_fail(self):
action = self.setup_connect_fail()
self.assertRaises(ValueError,
action.run,
"Cirrus.Nodes",
"AddNode")
def test_run_list_verb_arguments(self):
expected = {'verb_arguments': [
{'position': 0,
'name': "node",
'type': "SolarWinds.NCM.Contracts.InformationService.NCMNode",
'optional': False}]}
query_data = []
query_data.append(self.load_yaml("results_list_sdk_verb_args.yaml"))
action = self.get_action_instance(config=self.full_config)
action.connect = MagicMock(return_value="orion")
action.query = MagicMock(side_effect=query_data)
result = action.run("Cirrus.Nodes", "AddNode")
self.assertEqual(result, expected)
| true | true |
f72e3b822f5090311cc7b659618c8e63ab60c38a | 56,018 | py | Python | hubblestack/loader.py | vladmonea/hubble | 4db1653d8f65e88e7385651742377db5a7e088ce | [
"Apache-2.0"
] | 363 | 2017-01-10T22:02:47.000Z | 2022-03-21T10:44:40.000Z | hubblestack/loader.py | vladmonea/hubble | 4db1653d8f65e88e7385651742377db5a7e088ce | [
"Apache-2.0"
] | 439 | 2017-01-12T22:39:42.000Z | 2021-10-11T18:43:28.000Z | hubblestack/loader.py | vladmonea/hubble | 4db1653d8f65e88e7385651742377db5a7e088ce | [
"Apache-2.0"
] | 138 | 2017-01-05T22:10:59.000Z | 2021-09-01T14:35:00.000Z | # -*- coding: utf-8 -*-
"""
The Salt loader is the core to Salt's plugin system, the loader scans
directories for python loadable code and organizes the code into the
plugin interfaces used by Salt.
"""
import os
import re
import sys
import time
import yaml
import logging
import inspect
import tempfile
import functools
import threading
import traceback
import types
from zipimport import zipimporter
import hubblestack.config
import hubblestack.syspaths
import hubblestack.utils.args
import hubblestack.utils.context
import hubblestack.utils.data
import hubblestack.utils.dictupdate
import hubblestack.utils.files
import hubblestack.utils.lazy
import hubblestack.utils.odict
import hubblestack.utils.platform
import hubblestack.utils.versions
from hubblestack.exceptions import LoaderError
from hubblestack.template import check_render_pipe_str
from hubblestack.utils.decorators import Depends
import hubblestack.syspaths
import importlib.machinery
import importlib.util
import pkg_resources
try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
log = logging.getLogger(__name__)
HUBBLE_BASE_PATH = os.path.abspath(hubblestack.syspaths.INSTALL_DIR)
LOADED_BASE_NAME = "hubble.loaded"
MODULE_KIND_SOURCE = 1
MODULE_KIND_COMPILED = 2
MODULE_KIND_EXTENSION = 3
MODULE_KIND_PKG_DIRECTORY = 5
SUFFIXES = []
for suffix in importlib.machinery.EXTENSION_SUFFIXES:
SUFFIXES.append((suffix, "rb", MODULE_KIND_EXTENSION))
for suffix in importlib.machinery.SOURCE_SUFFIXES:
SUFFIXES.append((suffix, "rb", MODULE_KIND_SOURCE))
for suffix in importlib.machinery.BYTECODE_SUFFIXES:
SUFFIXES.append((suffix, "rb", MODULE_KIND_COMPILED))
MODULE_KIND_MAP = {
MODULE_KIND_SOURCE: importlib.machinery.SourceFileLoader,
MODULE_KIND_COMPILED: importlib.machinery.SourcelessFileLoader,
MODULE_KIND_EXTENSION: importlib.machinery.ExtensionFileLoader,
}
PY3_PRE_EXT = re.compile(r"\.cpython-{0}{1}(\.opt-[1-9])?".format(*sys.version_info[:2]))
# Will be set to pyximport module at runtime if cython is enabled in config.
pyximport = None # pylint: disable=invalid-name
PRESERVABLE_OPTS = dict()
def set_preservable_opts(opts):
"""
This is a scope hack designed to make sure any __opts__ we pass to the
modules can survive recycling of the lazy loaders.
To be sure, this is to protect an anti-pattern where the modules sometimes
store data for reporting in __opts__ (why did we start doing this?).
We call this from hubblestack.daemon.refresh_grains.
"""
oid = id(opts)
if oid not in PRESERVABLE_OPTS:
log.debug("setting %d to be preservable opts", oid)
PRESERVABLE_OPTS[oid] = opts.copy()
def get_preserved_opts(opts):
"""part of a scope hack, see: set_preservable_opts
we call this from __prep_mod_opts to invoke the scope hack
"""
oid = id(opts)
ret = PRESERVABLE_OPTS.get(oid)
if ret:
log.debug("preserved opts found (%d)", oid)
return ret
def _module_dirs(
opts,
ext_type,
tag=None,
int_type=None,
ext_dirs=True,
ext_type_dirs=None,
base_path=None,
explain=False,
):
if tag is None:
tag = ext_type
# NOTE: this ordering is most authoritative last. if we find a grains
# module in salt, we want to replace it with the grains module from hubble,
# so hubble's path should come last.
ext_types = os.path.join(opts["extension_modules"], ext_type)
sys_types = os.path.join(base_path or HUBBLE_BASE_PATH, int_type or ext_type)
hubblestack_type = "hubblestack_" + (int_type or ext_type)
files_base_types = os.path.join(base_path or HUBBLE_BASE_PATH, "files", hubblestack_type)
ext_type_types = []
if ext_dirs:
if tag is not None and ext_type_dirs is None:
ext_type_dirs = "{0}_dirs".format(tag)
if ext_type_dirs in opts:
ext_type_types.extend(opts[ext_type_dirs])
for entry_point in pkg_resources.iter_entry_points("hubble.loader", ext_type_dirs):
try:
loaded_entry_point = entry_point.load()
for path in loaded_entry_point():
ext_type_types.append(path)
except Exception as exc:
log.error("Error getting module directories from %s: %s", _format_entrypoint_target(entry_point), exc)
log.debug("Full backtrace for module directories error", exc_info=True)
cli_module_dirs = []
# The dirs can be any module dir, or a in-tree _{ext_type} dir
for _dir in opts.get("module_dirs", []):
# Prepend to the list to match cli argument ordering
maybe_dir = os.path.join(_dir, ext_type)
if os.path.isdir(maybe_dir):
cli_module_dirs.insert(0, maybe_dir)
continue
maybe_dir = os.path.join(_dir, "_{0}".format(ext_type))
if os.path.isdir(maybe_dir):
cli_module_dirs.insert(0, maybe_dir)
as_tuple = (cli_module_dirs, ext_type_types, [files_base_types, ext_types, sys_types])
log.debug("_module_dirs() => %s", as_tuple)
if explain:
return as_tuple
return cli_module_dirs + ext_type_types + [files_base_types, ext_types, sys_types]
def modules(
opts,
context=None,
utils=None,
whitelist=None,
loaded_base_name=None,
static_modules=None,
proxy=None,
):
"""
Load execution modules
Returns a dictionary of execution modules appropriate for the current
system by evaluating the __virtual__() function in each module.
:param dict opts: The Salt options dictionary
:param dict context: A Salt context that should be made present inside
generated modules in __context__
:param dict utils: Utility functions which should be made available to
Salt modules in __utils__. See `utils_dirs` in
hubblestack.config for additional information about
configuration.
:param list whitelist: A list of modules which should be whitelisted.
:param str loaded_base_name: A string marker for the loaded base name.
.. code-block:: python
import hubblestack.config
import hubblestack.loader
__opts__ = hubblestack.config.get_config('/etc/salt/minion')
__grains__ = hubblestack.loader.grains(__opts__)
__opts__['grains'] = __grains__
__utils__ = hubblestack.loader.utils(__opts__)
__mods__ = hubblestack.loader.modules(__opts__, utils=__utils__)
__mods__['test.ping']()
"""
# TODO Publish documentation for module whitelisting
if not whitelist:
whitelist = opts.get("whitelist_modules", None)
ret = LazyLoader(
_module_dirs(opts, "modules", "module"),
opts,
tag="module",
pack={"__context__": context, "__utils__": utils, "__proxy__": proxy},
whitelist=whitelist,
loaded_base_name=loaded_base_name,
static_modules=static_modules,
)
# this is the very definition of a circular ref... we added a destructor
# to deal with this, although the newest pythons periodically detect
# detached circular ref items during garbage collection.
ret.pack["__mods__"] = ret
return ret
def returners(opts, functions, whitelist=None, context=None, proxy=None):
"""
Returns the returner modules
"""
return LazyLoader(
_module_dirs(opts, "returners", "returner"),
opts,
tag="returner",
whitelist=whitelist,
pack={"__mods__": functions, "__context__": context, "__proxy__": proxy or {}},
)
def utils(opts, whitelist=None, context=None, proxy=None):
"""
Returns the utility modules
"""
return LazyLoader(
_module_dirs(opts, "utils", ext_type_dirs="utils_dirs"),
opts,
tag="utils",
whitelist=whitelist,
pack={"__context__": context, "__proxy__": proxy or {}},
)
def fileserver(opts, backends):
"""
Returns the file server modules
"""
return LazyLoader(
_module_dirs(opts, "fileserver"), opts, tag="fileserver", whitelist=backends, pack={"__utils__": utils(opts)}
)
def grain_funcs(opts):
"""
Returns the grain functions
.. code-block:: python
import hubblestack.config
import hubblestack.loader
__opts__ = hubblestack.config.get_config('/etc/salt/minion')
grainfuncs = hubblestack.loader.grain_funcs(__opts__)
"""
return LazyLoader(
_module_dirs(
opts,
"grains",
"grain",
ext_type_dirs="grains_dirs",
),
opts,
tag="grains",
)
def grains(opts, force_refresh=False):
"""
Return the functions for the dynamic grains and the values for the static
grains.
Since grains are computed early in the startup process, grains functions
do not have __mods__ available.
.. code-block:: python
import hubblestack.config
import hubblestack.loader
__opts__ = hubblestack.config.get_config('/etc/hubble/hubble')
__grains__ = hubblestack.loader.grains(__opts__)
print __grains__['id']
"""
# Need to re-import hubblestack.config, somehow it got lost when a minion is starting
import hubblestack.config
# if we have no grains, lets try loading from disk (TODO: move to decorator?)
cfn = os.path.join(opts["cachedir"], "grains.cache.p")
if opts.get("skip_grains", False):
return {}
grains_deep_merge = opts.get("grains_deep_merge", False) is True
if "conf_file" in opts:
pre_opts = {}
pre_opts.update(
hubblestack.config.load_config(
opts["conf_file"], "HUBBLE_CONFIG", hubblestack.config.DEFAULT_OPTS["conf_file"]
)
)
default_include = pre_opts.get("default_include", opts["default_include"])
include = pre_opts.get("include", [])
pre_opts.update(hubblestack.config.include_config(default_include, opts["conf_file"], verbose=False))
pre_opts.update(hubblestack.config.include_config(include, opts["conf_file"], verbose=True))
if "grains" in pre_opts:
opts["grains"] = pre_opts["grains"]
else:
opts["grains"] = {}
else:
opts["grains"] = {}
grains_data = {}
funcs = grain_funcs(opts)
if force_refresh: # if we refresh, lets reload grain modules
funcs.clear()
# Run core grains
for key in funcs:
if not key.startswith("core."):
continue
log.trace("Loading %s grain", key)
ret = funcs[key]()
if not isinstance(ret, dict):
continue
if grains_deep_merge:
hubblestack.utils.dictupdate.update(grains_data, ret)
else:
grains_data.update(ret)
# Run the rest of the grains
for key in funcs:
if key.startswith("core.") or key == "_errors":
continue
try:
log.trace("Loading %s grain", key)
parameters = hubblestack.utils.args.get_function_argspec(funcs[key]).args
kwargs = {}
if "grains" in parameters:
kwargs["grains"] = grains_data
ret = funcs[key](**kwargs)
except Exception:
log.critical(
"Failed to load grains defined in grain file %s in " "function %s, error:\n",
key,
funcs[key],
exc_info=True,
)
continue
if not isinstance(ret, dict):
continue
if grains_deep_merge:
hubblestack.utils.dictupdate.update(grains_data, ret)
else:
grains_data.update(ret)
grains_data.update(opts["grains"])
# Write cache if enabled
if opts.get("grains_cache", False):
with hubblestack.utils.files.set_umask(0o077):
try:
if hubblestack.utils.platform.is_windows():
# Late import
import hubblestack.modules.cmdmod
# Make sure cache file isn't read-only
hubblestack.modules.cmdmod._run_quiet('attrib -R "{0}"'.format(cfn))
with hubblestack.utils.files.fopen(cfn, "w+b") as fp_:
try:
serial = hubblestack.payload.Serial(opts)
serial.dump(grains_data, fp_)
except TypeError as e:
log.error("Failed to serialize grains cache: %s", e)
raise # re-throw for cleanup
except Exception as e:
log.error("Unable to write to grains cache file %s: %s", cfn, e)
# Based on the original exception, the file may or may not have been
# created. If it was, we will remove it now, as the exception means
# the serialized data is not to be trusted, no matter what the
# exception is.
if os.path.isfile(cfn):
os.unlink(cfn)
if grains_deep_merge:
hubblestack.utils.dictupdate.update(grains_data, opts["grains"])
else:
grains_data.update(opts["grains"])
return hubblestack.utils.data.decode(grains_data, preserve_tuples=True)
def render(opts, functions):
"""
Returns the render modules
"""
pack = {"__mods__": functions, "__grains__": opts.get("grains", {})}
ret = LazyLoader(
_module_dirs(
opts,
"renderers",
"render",
ext_type_dirs="render_dirs",
),
opts,
tag="render",
pack=pack,
)
rend = FilterDictWrapper(ret, ".render")
if not check_render_pipe_str(opts["renderer"], rend, opts["renderer_blacklist"], opts["renderer_whitelist"]):
err = (
"The renderer {0} is unavailable, this error is often because "
"the needed software is unavailable".format(opts["renderer"])
)
log.critical(err)
raise LoaderError(err)
return rend
def _generate_module(name):
if name in sys.modules:
return
code = "'''Salt loaded {0} parent module'''".format(name.split(".")[-1])
# ModuleType can't accept a unicode type on PY2
module = types.ModuleType(str(name)) # future lint: disable=blacklisted-function
exec(code, module.__dict__)
sys.modules[name] = module
def _mod_type(module_path):
if module_path.startswith(HUBBLE_BASE_PATH):
return "int"
return "ext"
class LazyLoader(hubblestack.utils.lazy.LazyDict):
"""
A pseduo-dictionary which has a set of keys which are the
name of the module and function, delimited by a dot. When
the value of the key is accessed, the function is then loaded
from disk and into memory.
.. note::
Iterating over keys will cause all modules to be loaded.
:param list module_dirs: A list of directories on disk to search for modules
:param dict opts: The salt options dictionary.
:param str tag: The tag for the type of module to load
:param func mod_type_check: A function which can be used to verify files
:param dict pack: A dictionary of function to be packed into modules as they are loaded
:param list whitelist: A list of modules to whitelist
:param bool virtual_enable: Whether or not to respect the __virtual__ function when loading modules.
:param str virtual_funcs: The name of additional functions in the module to call to verify its functionality.
If not true, the module will not load.
:returns: A LazyLoader object which functions as a dictionary. Keys are 'module.function' and values
are function references themselves which are loaded on-demand.
# TODO:
- move modules_max_memory into here
- singletons (per tag)
"""
mod_dict_class = hubblestack.utils.odict.OrderedDict
def __del__(self):
# trying to use logging in here works for debugging, but later causes
# problems at runtime during global destruction.
# log.debug("clearing possible memory leaks by emptying pack, missing_modules and loaded_modules dicts")
self.pack.clear()
self.missing_modules.clear()
self.loaded_modules.clear()
def __init__(
self,
module_dirs,
opts=None,
tag="module",
loaded_base_name=None,
mod_type_check=None,
pack=None,
whitelist=None,
virtual_enable=True,
static_modules=None,
funcname_filter=None,
xlate_modnames=None,
xlate_funcnames=None,
proxy=None,
virtual_funcs=None,
): # pylint: disable=W0231
"""
In pack, if any of the values are None they will be replaced with an
empty context-specific dict
"""
self.funcname_filter = funcname_filter
self.xlate_modnames = xlate_modnames
self.xlate_funcnames = xlate_funcnames
self.pack = {} if pack is None else pack
if opts is None:
opts = {}
threadsafety = not opts.get("multiprocessing")
self.context_dict = hubblestack.utils.context.ContextDict(threadsafe=threadsafety)
self.opts = self.__prep_mod_opts(opts)
self.module_dirs = module_dirs
self.tag = tag
self.loaded_base_name = loaded_base_name or LOADED_BASE_NAME
self.mod_type_check = mod_type_check or _mod_type
if "__context__" not in self.pack:
self.pack["__context__"] = None
for k, v in self.pack.items():
if v is None: # if the value of a pack is None, lets make an empty dict
self.context_dict.setdefault(k, {})
self.pack[k] = hubblestack.utils.context.NamespacedDictWrapper(self.context_dict, k)
self.whitelist = whitelist
self.virtual_enable = virtual_enable
self.initial_load = True
# names of modules that we don't have (errors, __virtual__, etc.)
self.missing_modules = {} # mapping of name -> error
self.loaded_modules = {} # mapping of module_name -> dict_of_functions
self.loaded_files = set() # TODO: just remove them from file_mapping?
self.static_modules = static_modules if static_modules else []
if virtual_funcs is None:
virtual_funcs = []
self.virtual_funcs = virtual_funcs
self.disabled = set(self.opts.get("disable_{0}{1}".format(self.tag, "" if self.tag[-1] == "s" else "s"), []))
# A map of suffix to description for imp
self.suffix_map = {}
# A list to determine precedence of extensions
# Prefer packages (directories) over modules (single files)!
self.suffix_order = [""]
for (suffix, mode, kind) in SUFFIXES:
self.suffix_map[suffix] = (suffix, mode, kind)
self.suffix_order.append(suffix)
self._lock = threading.RLock()
self._refresh_file_mapping()
super(LazyLoader, self).__init__() # late init the lazy loader
# create all of the import namespaces
for subspace in ("int", "ext", "e_int", "salt"):
_generate_module(".".join([self.loaded_base_name, tag]))
_generate_module(".".join([self.loaded_base_name, tag, subspace]))
def __getitem__(self, item):
"""
Override the __getitem__ in order to decorate the returned function if we need
to last-minute inject globals
"""
return super(LazyLoader, self).__getitem__(item)
def __getattr__(self, mod_name):
"""
Allow for "direct" attribute access-- this allows jinja templates to
access things like `hubblestack.test.ping()`
"""
if mod_name in ("__getstate__", "__setstate__"):
return object.__getattribute__(self, mod_name)
# if we have an attribute named that, lets return it.
try:
return object.__getattr__(self, mod_name) # pylint: disable=no-member
except AttributeError:
pass
# otherwise we assume its jinja template access
if mod_name not in self.loaded_modules and not self.loaded:
for name in self._iter_files(mod_name):
if name in self.loaded_files:
continue
# if we got what we wanted, we are done
if self._load_module(name) and mod_name in self.loaded_modules:
break
if mod_name in self.loaded_modules:
return self.loaded_modules[mod_name]
else:
raise AttributeError(mod_name)
def missing_fun_string(self, function_name):
"""
Return the error string for a missing function.
This can range from "not available' to "__virtual__" returned False
"""
mod_name = function_name.split(".")[0]
if mod_name in self.loaded_modules:
return "'{0}' is not available.".format(function_name)
else:
try:
reason = self.missing_modules[mod_name]
except KeyError:
return "'{0}' is not available.".format(function_name)
else:
if reason is not None:
return "'{0}' __virtual__ returned False: {1}".format(mod_name, reason)
else:
return "'{0}' __virtual__ returned False".format(mod_name)
def _refresh_file_mapping(self):
"""
refresh the mapping of the FS on disk
"""
# map of suffix to description for imp
if self.opts.get("cython_enable", True) is True:
try:
global pyximport # pylint: disable=invalid-name
pyximport = __import__("pyximport") # pylint: disable=import-error
pyximport.install()
# add to suffix_map so file_mapping will pick it up
self.suffix_map[".pyx"] = tuple()
except ImportError:
log.info(
"Cython is enabled in the options but not present " "in the system path. Skipping Cython modules."
)
# Allow for zipimport of modules
if self.opts.get("enable_zip_modules", True) is True:
self.suffix_map[".zip"] = tuple()
# allow for module dirs
self.suffix_map[""] = ("", "", MODULE_KIND_PKG_DIRECTORY)
# create mapping of filename (without suffix) to (path, suffix)
# The files are added in order of priority, so order *must* be retained.
self.file_mapping = hubblestack.utils.odict.OrderedDict()
opt_match = []
def _replace_pre_ext(obj):
"""
Hack so we can get the optimization level that we replaced (if
any) out of the re.sub call below. We use a list here because
it is a persistent data structure that we will be able to
access after re.sub is called.
"""
opt_match.append(obj)
return ""
for mod_dir in self.module_dirs:
try:
# Make sure we have a sorted listdir in order to have
# expectable override results
files = sorted(x for x in os.listdir(mod_dir) if x != "__pycache__")
except OSError:
continue # Next mod_dir
try:
pycache_files = [
os.path.join("__pycache__", x) for x in sorted(os.listdir(os.path.join(mod_dir, "__pycache__")))
]
except OSError:
pass
else:
files.extend(pycache_files)
for filename in files:
try:
dirname, basename = os.path.split(filename)
if basename.startswith("_"):
# skip private modules
# log messages omitted for obviousness
continue # Next filename
f_noext, ext = os.path.splitext(basename)
f_noext = PY3_PRE_EXT.sub(_replace_pre_ext, f_noext)
try:
opt_level = int(opt_match.pop().group(1).rsplit("-", 1)[-1])
except (AttributeError, IndexError, ValueError):
# No regex match or no optimization level matched
opt_level = 0
try:
opt_index = self.opts["optimization_order"].index(opt_level)
except KeyError:
log.trace(
"Disallowed optimization level %d for module "
"name '%s', skipping. Add %d to the "
"'optimization_order' config option if you "
"do not want to ignore this optimization "
"level.",
opt_level,
f_noext,
opt_level,
)
continue
else:
# Optimization level not reflected in filename on PY2
opt_index = 0
# make sure it is a suffix we support
if ext not in self.suffix_map:
continue # Next filename
if f_noext in self.disabled:
log.trace("Skipping %s, it is disabled by configuration", filename)
continue # Next filename
fpath = os.path.join(mod_dir, filename)
# if its a directory, lets allow us to load that
if ext == "":
# is there something __init__?
subfiles = os.listdir(fpath)
for suffix in self.suffix_order:
if "" == suffix:
continue # Next suffix (__init__ must have a suffix)
init_file = "__init__{0}".format(suffix)
if init_file in subfiles:
break
else:
continue # Next filename
try:
curr_ext = self.file_mapping[f_noext][1]
curr_opt_index = self.file_mapping[f_noext][2]
except KeyError:
pass
else:
if "" in (curr_ext, ext) and curr_ext != ext:
log.error("Module/package collision: '%s' and '%s'", fpath, self.file_mapping[f_noext][0])
if ext == ".pyc" and curr_ext == ".pyc":
# Check the optimization level
if opt_index >= curr_opt_index:
# Module name match, but a higher-priority
# optimization level was already matched, skipping.
continue
if not dirname and ext == ".pyc":
# On Python 3, we should only load .pyc files from the
# __pycache__ subdirectory (i.e. when dirname is not an
# empty string).
continue
# Made it this far - add it
self.file_mapping[f_noext] = (fpath, ext, opt_index)
except OSError:
continue
for smod in self.static_modules:
f_noext = smod.split(".")[-1]
self.file_mapping[f_noext] = (smod, ".o", 0)
def clear(self):
"""
Clear the dict
"""
with self._lock:
super(LazyLoader, self).clear() # clear the lazy loader
self.loaded_files = set()
self.missing_modules = {}
self.loaded_modules = {}
# if we have been loaded before, lets clear the file mapping since
# we obviously want a re-do
if hasattr(self, "opts"):
self._refresh_file_mapping()
self.initial_load = False
def __prep_mod_opts(self, opts):
"""
Strip out of the opts any logger instance
"""
if "__grains__" not in self.pack:
self.context_dict["grains"] = opts.get("grains", {})
self.pack["__grains__"] = hubblestack.utils.context.NamespacedDictWrapper(self.context_dict, "grains")
if "__pillar__" not in self.pack:
self.context_dict["pillar"] = opts.get("pillar", {})
self.pack["__pillar__"] = hubblestack.utils.context.NamespacedDictWrapper(self.context_dict, "pillar")
ret = opts.copy()
for item in ("logger",):
if item in ret:
del ret[item]
pres_opt = get_preserved_opts(opts)
if pres_opt is not None:
pres_opt.update(ret)
return pres_opt
return ret
def _iter_files(self, mod_name):
"""
Iterate over all file_mapping files in order of closeness to mod_name
"""
# do we have an exact match?
if mod_name in self.file_mapping:
yield mod_name
# do we have a partial match?
for k in self.file_mapping:
if mod_name in k:
yield k
# anyone else? Bueller?
for k in self.file_mapping:
if mod_name not in k:
yield k
def _reload_submodules(self, mod):
submodules = (getattr(mod, sname) for sname in dir(mod) if isinstance(getattr(mod, sname), mod.__class__))
# reload only custom "sub"modules
for submodule in submodules:
# it is a submodule if the name is in a namespace under mod
if submodule.__name__.startswith(mod.__name__ + "."):
reload_module(submodule)
self._reload_submodules(submodule)
def _load_module(self, name):
mod = None
fpath, suffix = self.file_mapping[name][:2]
self.loaded_files.add(name)
fpath_dirname = os.path.dirname(fpath)
try:
sys.path.append(fpath_dirname)
if fpath_dirname.endswith("__pycache__"):
sys.path.append(os.path.dirname(fpath_dirname))
if suffix == ".pyx":
mod = pyximport.load_module(name, fpath, tempfile.gettempdir())
elif suffix == ".o":
top_mod = __import__(fpath, globals(), locals(), [])
comps = fpath.split(".")
if len(comps) < 2:
mod = top_mod
else:
mod = top_mod
for subname in comps[1:]:
mod = getattr(mod, subname)
elif suffix == ".zip":
mod = zipimporter(fpath).load_module(name)
else:
desc = self.suffix_map[suffix]
# if it is a directory, we don't open a file
try:
mod_namespace = ".".join((self.loaded_base_name, self.mod_type_check(fpath), self.tag, name))
except TypeError:
mod_namespace = "{0}.{1}.{2}.{3}".format(
self.loaded_base_name, self.mod_type_check(fpath), self.tag, name
)
if suffix == "":
# pylint: disable=no-member
# Package directory, look for __init__
loader_details = [
(importlib.machinery.SourceFileLoader, importlib.machinery.SOURCE_SUFFIXES),
(importlib.machinery.SourcelessFileLoader, importlib.machinery.BYTECODE_SUFFIXES),
(importlib.machinery.ExtensionFileLoader, importlib.machinery.EXTENSION_SUFFIXES),
]
file_finder = importlib.machinery.FileFinder(fpath_dirname, *loader_details)
spec = file_finder.find_spec(mod_namespace)
if spec is None:
raise ImportError()
# TODO: Get rid of load_module in favor of
# exec_module below. load_module is deprecated, but
# loading using exec_module has been causing odd things
# with the magic dunders we pack into the loaded
# modules, most notably with salt-ssh's __opts__.
mod = spec.loader.load_module()
# mod = importlib.util.module_from_spec(spec)
# spec.loader.exec_module(mod)
# pylint: enable=no-member
sys.modules[mod_namespace] = mod
# reload all submodules if necessary
if not self.initial_load:
self._reload_submodules(mod)
else:
# pylint: disable=no-member
loader = MODULE_KIND_MAP[desc[2]](mod_namespace, fpath)
spec = importlib.util.spec_from_file_location(mod_namespace, fpath, loader=loader)
if spec is None:
raise ImportError()
# TODO: Get rid of load_module in favor of
# exec_module below. load_module is deprecated, but
# loading using exec_module has been causing odd things
# with the magic dunders we pack into the loaded
# modules, most notably with salt-ssh's __opts__.
mod = spec.loader.load_module()
# mod = importlib.util.module_from_spec(spec)
# spec.loader.exec_module(mod)
# pylint: enable=no-member
sys.modules[mod_namespace] = mod
except IOError:
raise
except ImportError as exc:
if "magic number" in str(exc):
error_msg = "Failed to import {0} {1}. Bad magic number. If migrating from Python2 to Python3, remove all .pyc files and try again.".format(
self.tag, name
)
log.warning(error_msg)
self.missing_modules[name] = error_msg
log.debug("Failed to import %s %s:\n", self.tag, name, exc_info=True)
self.missing_modules[name] = exc
return False
except Exception as error:
log.error(
"Failed to import %s %s, this is due most likely to a " "syntax error:\n",
self.tag,
name,
exc_info=True,
)
self.missing_modules[name] = error
return False
except SystemExit as error:
try:
fn_, _, caller, _ = traceback.extract_tb(sys.exc_info()[2])[-1]
except Exception:
pass
else:
tgt_fn = os.path.join("salt", "utils", "process.py")
if fn_.endswith(tgt_fn) and "_handle_signals" in caller:
# Race conditon, SIGTERM or SIGINT received while loader
# was in process of loading a module. Call sys.exit to
# ensure that the process is killed.
sys.exit(0)
log.error("Failed to import %s %s as the module called exit()\n", self.tag, name, exc_info=True)
self.missing_modules[name] = error
return False
finally:
sys.path.remove(fpath_dirname)
if hasattr(mod, "__opts__"):
mod.__opts__.update(self.opts)
else:
mod.__opts__ = self.opts
# pack whatever other globals we were asked to
for p_name, p_value in self.pack.items():
setattr(mod, p_name, p_value)
module_name = mod.__name__.rsplit(".", 1)[-1]
if callable(self.xlate_modnames):
module_name = self.xlate_modnames([module_name], name, fpath, suffix, mod, mode="module_name")
name = self.xlate_modnames([name], name, fpath, suffix, mod, mode="name")
# Call a module's initialization method if it exists
module_init = getattr(mod, "__init__", None)
if inspect.isfunction(module_init):
try:
module_init(self.opts)
except TypeError as e:
log.error(e)
except Exception:
err_string = "__init__ failed"
log.debug("Error loading %s.%s: %s", self.tag, module_name, err_string, exc_info=True)
self.missing_modules[module_name] = err_string
self.missing_modules[name] = err_string
return False
# if virtual modules are enabled, we need to look for the
# __virtual__() function inside that module and run it.
if self.virtual_enable:
virtual_funcs_to_process = ["__virtual__"] + self.virtual_funcs
for virtual_func in virtual_funcs_to_process:
virtual_ret, module_name, virtual_err, virtual_aliases = self._process_virtual(
mod, module_name, virtual_func
)
if virtual_err is not None:
log.trace("Error loading %s.%s: %s", self.tag, module_name, virtual_err)
# if _process_virtual returned a non-True value then we are
# supposed to not process this module
if virtual_ret is not True and module_name not in self.missing_modules:
# If a module has information about why it could not be loaded, record it
self.missing_modules[module_name] = virtual_err
self.missing_modules[name] = virtual_err
return False
else:
virtual_aliases = ()
if getattr(mod, "__load__", False) is not False:
log.info(
"The functions from module '%s' are being loaded from the " "provided __load__ attribute", module_name
)
# If we had another module by the same virtual name, we should put any
# new functions under the existing dictionary.
mod_names = [module_name] + list(virtual_aliases)
if callable(self.xlate_modnames):
mod_names = self.xlate_modnames(mod_names, name, fpath, suffix, mod, mode="mod_names")
mod_dict = dict(((x, self.loaded_modules.get(x, self.mod_dict_class())) for x in mod_names))
for attr in getattr(mod, "__load__", dir(mod)):
if attr.startswith("_"):
# private functions are skipped
continue
func = getattr(mod, attr)
if not inspect.isfunction(func) and not isinstance(func, functools.partial):
# Not a function!? Skip it!!!
continue
if callable(self.funcname_filter) and not self.funcname_filter(attr, mod):
# rejected by filter
continue
# Let's get the function name.
# If the module has the __func_alias__ attribute, it must be a
# dictionary mapping in the form of(key -> value):
# <real-func-name> -> <desired-func-name>
#
# It default's of course to the found callable attribute name
# if no alias is defined.
funcname = getattr(mod, "__func_alias__", {}).get(attr, attr)
for tgt_mod in mod_names:
try:
full_funcname = ".".join((tgt_mod, funcname))
except TypeError:
full_funcname = "{0}.{1}".format(tgt_mod, funcname)
if callable(self.xlate_funcnames):
funcname, full_funcname = self.xlate_funcnames(
name, fpath, suffix, tgt_mod, funcname, full_funcname, mod, func
)
# Save many references for lookups
# Careful not to overwrite existing (higher priority) functions
if full_funcname not in self._dict:
self._dict[full_funcname] = func
if funcname not in mod_dict[tgt_mod]:
setattr(mod_dict[tgt_mod], funcname, func)
mod_dict[tgt_mod][funcname] = func
self._apply_outputter(func, mod)
# enforce depends
try:
Depends.enforce_dependencies(self._dict, self.tag, name)
except RuntimeError as exc:
log.info("Depends.enforce_dependencies() failed for the following " "reason: %s", exc)
for tgt_mod in mod_names:
self.loaded_modules[tgt_mod] = mod_dict[tgt_mod]
return True
def _load(self, key):
"""
Load a single item if you have it
"""
# if the key doesn't have a '.' then it isn't valid for this mod dict
if not isinstance(key, str):
raise KeyError("The key must be a string.")
if "." not in key:
raise KeyError("The key '{0}' should contain a '.'".format(key))
mod_name, _ = key.split(".", 1)
with self._lock:
# It is possible that the key is in the dictionary after
# acquiring the lock due to another thread loading it.
if mod_name in self.missing_modules or key in self._dict:
return True
# if the modulename isn't in the whitelist, don't bother
if self.whitelist and mod_name not in self.whitelist:
log.error(
"Failed to load function %s because its module (%s) is " "not in the whitelist: %s",
key,
mod_name,
self.whitelist,
)
raise KeyError(key)
def _inner_load(mod_name):
for name in self._iter_files(mod_name):
if name in self.loaded_files:
continue
# if we got what we wanted, we are done
if self._load_module(name) and key in self._dict:
return True
return False
# try to load the module
ret = None
reloaded = False
# re-scan up to once, IOErrors or a failed load cause re-scans of the
# filesystem
while True:
try:
ret = _inner_load(mod_name)
if not reloaded and ret is not True:
self._refresh_file_mapping()
reloaded = True
continue
break
except IOError:
if not reloaded:
self._refresh_file_mapping()
reloaded = True
continue
return ret
def _load_all(self):
"""
Load all of them
"""
with self._lock:
for name in self.file_mapping:
if name in self.loaded_files or name in self.missing_modules:
continue
self._load_module(name)
self.loaded = True
def reload_modules(self):
with self._lock:
self.loaded_files = set()
self._load_all()
def _apply_outputter(self, func, mod):
"""
Apply the __outputter__ variable to the functions
"""
if hasattr(mod, "__outputter__"):
outp = mod.__outputter__
if func.__name__ in outp:
func.__outputter__ = outp[func.__name__]
def _process_virtual(self, mod, module_name, virtual_func="__virtual__"):
"""
Given a loaded module and its default name determine its virtual name
This function returns a tuple. The first value will be either True or
False and will indicate if the module should be loaded or not (i.e. if
it threw and exception while processing its __virtual__ function). The
second value is the determined virtual name, which may be the same as
the value provided.
The default name can be calculated as follows::
module_name = mod.__name__.rsplit('.', 1)[-1]
"""
# The __virtual__ function will return either a True or False value.
# If it returns a True value it can also set a module level attribute
# named __virtualname__ with the name that the module should be
# referred to as.
#
# This allows us to have things like the pkg module working on all
# platforms under the name 'pkg'. It also allows for modules like
# augeas_cfg to be referred to as 'augeas', which would otherwise have
# namespace collisions. And finally it allows modules to return False
# if they are not intended to run on the given platform or are missing
# dependencies.
virtual_aliases = getattr(mod, "__virtual_aliases__", tuple())
try:
error_reason = None
if hasattr(mod, "__virtual__") and inspect.isfunction(mod.__virtual__):
try:
start = time.time()
virtual = getattr(mod, virtual_func)()
if isinstance(virtual, tuple):
error_reason = virtual[1]
virtual = virtual[0]
if self.opts.get("virtual_timer", False):
end = time.time() - start
msg = "Virtual function took {0} seconds for {1}".format(end, module_name)
log.warning(msg)
except Exception as exc:
error_reason = (
"Exception raised when processing __virtual__ function"
" for {0}. Module will not be loaded: {1}".format(mod.__name__, exc)
)
log.error(error_reason, exc_info=True)
virtual = None
# Get the module's virtual name
virtualname = getattr(mod, "__virtualname__", virtual)
if not virtual:
# if __virtual__() evaluates to False then the module
# wasn't meant for this platform or it's not supposed to
# load for some other reason.
# Some modules might accidentally return None and are
# improperly loaded
if virtual is None:
log.warning(
"%s.__virtual__() is wrongly returning `None`. "
"It should either return `True`, `False` or a new "
"name. If you're the developer of the module "
"'%s', please fix this.",
mod.__name__,
module_name,
)
return (False, module_name, error_reason, virtual_aliases)
# At this point, __virtual__ did not return a
# boolean value, let's check for deprecated usage
# or module renames
if virtual is not True and module_name != virtual:
# The module is renaming itself. Updating the module name
# with the new name
log.trace("Loaded %s as virtual %s", module_name, virtual)
if not hasattr(mod, "__virtualname__"):
hubblestack.utils.versions.warn_until(
"Hydrogen",
"The '{0}' module is renaming itself in its "
"__virtual__() function ({1} => {2}). Please "
"set it's virtual name as the "
"'__virtualname__' module attribute. "
"Example: \"__virtualname__ = '{2}'\"".format(mod.__name__, module_name, virtual),
)
if virtualname != virtual:
# The __virtualname__ attribute does not match what's
# being returned by the __virtual__() function. This
# should be considered an error.
log.error(
"The module '%s' is showing some bad usage. Its "
"__virtualname__ attribute is set to '%s' yet the "
"__virtual__() function is returning '%s'. These "
"values should match!",
mod.__name__,
virtualname,
virtual,
)
module_name = virtualname
# If the __virtual__ function returns True and __virtualname__
# is set then use it
elif virtual is True and virtualname != module_name:
if virtualname is not True:
module_name = virtualname
except KeyError:
# Key errors come out of the virtual function when passing
# in incomplete grains sets, these can be safely ignored
# and logged to debug, still, it includes the traceback to
# help debugging.
log.error('Failed to LazyLoad "%s"', module_name, exc_info=True)
except Exception:
# If the module throws an exception during __virtual__()
# then log the information and continue to the next.
log.error("Failed to read the virtual function for %s: %s", self.tag, module_name, exc_info=True)
return (False, module_name, error_reason, virtual_aliases)
return (True, module_name, None, virtual_aliases)
class FilterDictWrapper(MutableMapping):
"""
Create a dict which wraps another dict with a specific key suffix on get
This is to replace "filter_load"
"""
def __init__(self, d, suffix):
self._dict = d
self.suffix = suffix
def __setitem__(self, key, val):
self._dict[key] = val
def __delitem__(self, key):
del self._dict[key]
def __getitem__(self, key):
return self._dict[key + self.suffix]
def __len__(self):
return len(self._dict)
def __iter__(self):
for key in self._dict:
if key.endswith(self.suffix):
yield key.replace(self.suffix, "")
def matchers(opts):
"""
Return the matcher services plugins
"""
return LazyLoader(_module_dirs(opts, "matchers"), opts, tag="matchers")
def _nova_funcname_filter(funcname, mod): # pylint: disable=unused-argument
"""
reject function names that aren't "audit"
args:
mod :- the actual imported module (allowing mod.__file__ examination, etc)
funcname :- the attribute name as given by dir(mod)
return:
True :- sure, we can provide this function
False :- skip this one
"""
if funcname == "audit":
return True
return False
def _nova_xlate_modnames(mod_names, name, fpath, suffix, mod, mode="mod_names"): # pylint: disable=unused-argument
"""
Translate (xlate) "service" into "/service"
args:
name :- the name of the module we're loading (e.g., 'service')
fpath :- the file path of the module we're loading
suffix :- the suffix of the module we're loading (e.g., '.pyc', usually)
mod :- the actual imported module (allowing mod.__file__ examination)
mode :- the name of the load_module variable being translated
return:
either a list of new names (for "mod_names") or a single new name
(for "name" and "module_name")
"""
new_modname = "/" + name
if mode in ("module_name", "name"):
return new_modname
return [new_modname]
def _nova_xlate_funcnames(
name, fpath, suffix, tgt_mod, funcname, full_funcname, mod, func
): # pylint: disable=unused-argument
"""
Translate (xlate) "service.audit" into "/service.py"
args:
name :- the name of the module we're loading (e.g., 'service')
fpath :- the file path of the module we're loading
suffix :- the suffix of the module we're loading (e.g., '.pyc', usually)
tgt_mod :- the current virtual name of the module we're loading (e.g., 'service')
funcname :- the function name we're maping (e.g., 'audit')
full_funcname :- the LazyLoader key format item (e.g., 'service.audit')
mod :- the actual imported module (allowing mod.__file__ examination)
func :- the actual function being mapped (allowing func.__name__)
return:
funcname, full_funcname
The old NovaLazyLoader's behavior can be mimicked without altering the
LazyLoader (very much) by simply pretending tgt_mod='/service',
funcname='py' and full_funcname='/service.py'.
"""
new_funcname = suffix[1:]
if new_funcname == "pyc":
new_funcname = "py"
return new_funcname, ".".join([name, new_funcname])
def nova(hubble_dir, opts, modules, context=None):
"""
Return a nova (!lazy) loader.
This does return a LazyLoader, but hubble.audit module always iterates the
keys forcing a full load, which somewhat defeates the purpose of using the
LazyLoader object at all.
nova() also populates loader.__data__ and loader.__missing_data__ for
backwards compatibility purposes but omits some overlapping functions that
were essentially unnecessary.
Originally hubble.audit used a special NovaLazyLoader that was intended to
make everything more readable but in fact only fragmented the codebase and
obsfucated the purpose and function of the new data elements it introduced.
The loader functions and file_mapping functions of the loader were also
hopelessly mixed up with the yaml data loaders for no apparent reason.
Presumably the original intent was to be able to use expressions like
__nova__['/cis/debian-9-whatever.yaml'] to access those data elements;
but this wasn't actually used, apparently favoring the form:
__nova__.__data__['/cis/whatever.yaml'] instead.
The __nova__.__data__['/whatever.yaml'] format is retained, but the
file_mapping['/whatever.yaml'] and load_module('whatever') functionality is
not. This means that anywhere refresh_filemapping() is expected to refresh
yaml on disk will no-longer do so. Interestingly, it didn't seem to work
before anyway, which seems to be the reason for the special sync() section
of the hubble.audit.
"""
loader = LazyLoader(
_module_dirs(opts, "nova"),
opts,
tag="nova",
funcname_filter=_nova_funcname_filter,
xlate_modnames=_nova_xlate_modnames,
xlate_funcnames=_nova_xlate_funcnames,
pack={"__context__": context, "__mods__": modules},
)
loader.__data__ = data = dict()
loader.__missing_data__ = missing_data = dict()
for mod_dir in hubble_dir:
for path, _, filenames in os.walk(mod_dir):
for filename in filenames:
pathname = os.path.join(path, filename)
name = pathname[len(mod_dir) :]
if filename.endswith(".yaml"):
try:
with open(pathname, "r") as fh:
data[name] = yaml.safe_load(fh)
except Exception as exc:
missing_data[name] = str(exc)
log.exception("Error loading yaml from %s", pathnmame)
return loader
| 39.477097 | 156 | 0.57926 |
import os
import re
import sys
import time
import yaml
import logging
import inspect
import tempfile
import functools
import threading
import traceback
import types
from zipimport import zipimporter
import hubblestack.config
import hubblestack.syspaths
import hubblestack.utils.args
import hubblestack.utils.context
import hubblestack.utils.data
import hubblestack.utils.dictupdate
import hubblestack.utils.files
import hubblestack.utils.lazy
import hubblestack.utils.odict
import hubblestack.utils.platform
import hubblestack.utils.versions
from hubblestack.exceptions import LoaderError
from hubblestack.template import check_render_pipe_str
from hubblestack.utils.decorators import Depends
import hubblestack.syspaths
import importlib.machinery
import importlib.util
import pkg_resources
try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
log = logging.getLogger(__name__)
HUBBLE_BASE_PATH = os.path.abspath(hubblestack.syspaths.INSTALL_DIR)
LOADED_BASE_NAME = "hubble.loaded"
MODULE_KIND_SOURCE = 1
MODULE_KIND_COMPILED = 2
MODULE_KIND_EXTENSION = 3
MODULE_KIND_PKG_DIRECTORY = 5
SUFFIXES = []
for suffix in importlib.machinery.EXTENSION_SUFFIXES:
SUFFIXES.append((suffix, "rb", MODULE_KIND_EXTENSION))
for suffix in importlib.machinery.SOURCE_SUFFIXES:
SUFFIXES.append((suffix, "rb", MODULE_KIND_SOURCE))
for suffix in importlib.machinery.BYTECODE_SUFFIXES:
SUFFIXES.append((suffix, "rb", MODULE_KIND_COMPILED))
MODULE_KIND_MAP = {
MODULE_KIND_SOURCE: importlib.machinery.SourceFileLoader,
MODULE_KIND_COMPILED: importlib.machinery.SourcelessFileLoader,
MODULE_KIND_EXTENSION: importlib.machinery.ExtensionFileLoader,
}
PY3_PRE_EXT = re.compile(r"\.cpython-{0}{1}(\.opt-[1-9])?".format(*sys.version_info[:2]))
pyximport = None
PRESERVABLE_OPTS = dict()
def set_preservable_opts(opts):
oid = id(opts)
if oid not in PRESERVABLE_OPTS:
log.debug("setting %d to be preservable opts", oid)
PRESERVABLE_OPTS[oid] = opts.copy()
def get_preserved_opts(opts):
oid = id(opts)
ret = PRESERVABLE_OPTS.get(oid)
if ret:
log.debug("preserved opts found (%d)", oid)
return ret
def _module_dirs(
opts,
ext_type,
tag=None,
int_type=None,
ext_dirs=True,
ext_type_dirs=None,
base_path=None,
explain=False,
):
if tag is None:
tag = ext_type
ext_types = os.path.join(opts["extension_modules"], ext_type)
sys_types = os.path.join(base_path or HUBBLE_BASE_PATH, int_type or ext_type)
hubblestack_type = "hubblestack_" + (int_type or ext_type)
files_base_types = os.path.join(base_path or HUBBLE_BASE_PATH, "files", hubblestack_type)
ext_type_types = []
if ext_dirs:
if tag is not None and ext_type_dirs is None:
ext_type_dirs = "{0}_dirs".format(tag)
if ext_type_dirs in opts:
ext_type_types.extend(opts[ext_type_dirs])
for entry_point in pkg_resources.iter_entry_points("hubble.loader", ext_type_dirs):
try:
loaded_entry_point = entry_point.load()
for path in loaded_entry_point():
ext_type_types.append(path)
except Exception as exc:
log.error("Error getting module directories from %s: %s", _format_entrypoint_target(entry_point), exc)
log.debug("Full backtrace for module directories error", exc_info=True)
cli_module_dirs = []
# The dirs can be any module dir, or a in-tree _{ext_type} dir
for _dir in opts.get("module_dirs", []):
# Prepend to the list to match cli argument ordering
maybe_dir = os.path.join(_dir, ext_type)
if os.path.isdir(maybe_dir):
cli_module_dirs.insert(0, maybe_dir)
continue
maybe_dir = os.path.join(_dir, "_{0}".format(ext_type))
if os.path.isdir(maybe_dir):
cli_module_dirs.insert(0, maybe_dir)
as_tuple = (cli_module_dirs, ext_type_types, [files_base_types, ext_types, sys_types])
log.debug("_module_dirs() => %s", as_tuple)
if explain:
return as_tuple
return cli_module_dirs + ext_type_types + [files_base_types, ext_types, sys_types]
def modules(
opts,
context=None,
utils=None,
whitelist=None,
loaded_base_name=None,
static_modules=None,
proxy=None,
):
# TODO Publish documentation for module whitelisting
if not whitelist:
whitelist = opts.get("whitelist_modules", None)
ret = LazyLoader(
_module_dirs(opts, "modules", "module"),
opts,
tag="module",
pack={"__context__": context, "__utils__": utils, "__proxy__": proxy},
whitelist=whitelist,
loaded_base_name=loaded_base_name,
static_modules=static_modules,
)
# this is the very definition of a circular ref... we added a destructor
# to deal with this, although the newest pythons periodically detect
# detached circular ref items during garbage collection.
ret.pack["__mods__"] = ret
return ret
def returners(opts, functions, whitelist=None, context=None, proxy=None):
return LazyLoader(
_module_dirs(opts, "returners", "returner"),
opts,
tag="returner",
whitelist=whitelist,
pack={"__mods__": functions, "__context__": context, "__proxy__": proxy or {}},
)
def utils(opts, whitelist=None, context=None, proxy=None):
return LazyLoader(
_module_dirs(opts, "utils", ext_type_dirs="utils_dirs"),
opts,
tag="utils",
whitelist=whitelist,
pack={"__context__": context, "__proxy__": proxy or {}},
)
def fileserver(opts, backends):
return LazyLoader(
_module_dirs(opts, "fileserver"), opts, tag="fileserver", whitelist=backends, pack={"__utils__": utils(opts)}
)
def grain_funcs(opts):
return LazyLoader(
_module_dirs(
opts,
"grains",
"grain",
ext_type_dirs="grains_dirs",
),
opts,
tag="grains",
)
def grains(opts, force_refresh=False):
# Need to re-import hubblestack.config, somehow it got lost when a minion is starting
import hubblestack.config
# if we have no grains, lets try loading from disk (TODO: move to decorator?)
cfn = os.path.join(opts["cachedir"], "grains.cache.p")
if opts.get("skip_grains", False):
return {}
grains_deep_merge = opts.get("grains_deep_merge", False) is True
if "conf_file" in opts:
pre_opts = {}
pre_opts.update(
hubblestack.config.load_config(
opts["conf_file"], "HUBBLE_CONFIG", hubblestack.config.DEFAULT_OPTS["conf_file"]
)
)
default_include = pre_opts.get("default_include", opts["default_include"])
include = pre_opts.get("include", [])
pre_opts.update(hubblestack.config.include_config(default_include, opts["conf_file"], verbose=False))
pre_opts.update(hubblestack.config.include_config(include, opts["conf_file"], verbose=True))
if "grains" in pre_opts:
opts["grains"] = pre_opts["grains"]
else:
opts["grains"] = {}
else:
opts["grains"] = {}
grains_data = {}
funcs = grain_funcs(opts)
if force_refresh: # if we refresh, lets reload grain modules
funcs.clear()
# Run core grains
for key in funcs:
if not key.startswith("core."):
continue
log.trace("Loading %s grain", key)
ret = funcs[key]()
if not isinstance(ret, dict):
continue
if grains_deep_merge:
hubblestack.utils.dictupdate.update(grains_data, ret)
else:
grains_data.update(ret)
# Run the rest of the grains
for key in funcs:
if key.startswith("core.") or key == "_errors":
continue
try:
log.trace("Loading %s grain", key)
parameters = hubblestack.utils.args.get_function_argspec(funcs[key]).args
kwargs = {}
if "grains" in parameters:
kwargs["grains"] = grains_data
ret = funcs[key](**kwargs)
except Exception:
log.critical(
"Failed to load grains defined in grain file %s in " "function %s, error:\n",
key,
funcs[key],
exc_info=True,
)
continue
if not isinstance(ret, dict):
continue
if grains_deep_merge:
hubblestack.utils.dictupdate.update(grains_data, ret)
else:
grains_data.update(ret)
grains_data.update(opts["grains"])
# Write cache if enabled
if opts.get("grains_cache", False):
with hubblestack.utils.files.set_umask(0o077):
try:
if hubblestack.utils.platform.is_windows():
# Late import
import hubblestack.modules.cmdmod
# Make sure cache file isn't read-only
hubblestack.modules.cmdmod._run_quiet('attrib -R "{0}"'.format(cfn))
with hubblestack.utils.files.fopen(cfn, "w+b") as fp_:
try:
serial = hubblestack.payload.Serial(opts)
serial.dump(grains_data, fp_)
except TypeError as e:
log.error("Failed to serialize grains cache: %s", e)
raise
except Exception as e:
log.error("Unable to write to grains cache file %s: %s", cfn, e)
if os.path.isfile(cfn):
os.unlink(cfn)
if grains_deep_merge:
hubblestack.utils.dictupdate.update(grains_data, opts["grains"])
else:
grains_data.update(opts["grains"])
return hubblestack.utils.data.decode(grains_data, preserve_tuples=True)
def render(opts, functions):
pack = {"__mods__": functions, "__grains__": opts.get("grains", {})}
ret = LazyLoader(
_module_dirs(
opts,
"renderers",
"render",
ext_type_dirs="render_dirs",
),
opts,
tag="render",
pack=pack,
)
rend = FilterDictWrapper(ret, ".render")
if not check_render_pipe_str(opts["renderer"], rend, opts["renderer_blacklist"], opts["renderer_whitelist"]):
err = (
"The renderer {0} is unavailable, this error is often because "
"the needed software is unavailable".format(opts["renderer"])
)
log.critical(err)
raise LoaderError(err)
return rend
def _generate_module(name):
if name in sys.modules:
return
code = "'''Salt loaded {0} parent module'''".format(name.split(".")[-1])
module = types.ModuleType(str(name)) # future lint: disable=blacklisted-function
exec(code, module.__dict__)
sys.modules[name] = module
def _mod_type(module_path):
if module_path.startswith(HUBBLE_BASE_PATH):
return "int"
return "ext"
class LazyLoader(hubblestack.utils.lazy.LazyDict):
mod_dict_class = hubblestack.utils.odict.OrderedDict
def __del__(self):
# trying to use logging in here works for debugging, but later causes
# problems at runtime during global destruction.
# log.debug("clearing possible memory leaks by emptying pack, missing_modules and loaded_modules dicts")
self.pack.clear()
self.missing_modules.clear()
self.loaded_modules.clear()
def __init__(
self,
module_dirs,
opts=None,
tag="module",
loaded_base_name=None,
mod_type_check=None,
pack=None,
whitelist=None,
virtual_enable=True,
static_modules=None,
funcname_filter=None,
xlate_modnames=None,
xlate_funcnames=None,
proxy=None,
virtual_funcs=None,
): # pylint: disable=W0231
self.funcname_filter = funcname_filter
self.xlate_modnames = xlate_modnames
self.xlate_funcnames = xlate_funcnames
self.pack = {} if pack is None else pack
if opts is None:
opts = {}
threadsafety = not opts.get("multiprocessing")
self.context_dict = hubblestack.utils.context.ContextDict(threadsafe=threadsafety)
self.opts = self.__prep_mod_opts(opts)
self.module_dirs = module_dirs
self.tag = tag
self.loaded_base_name = loaded_base_name or LOADED_BASE_NAME
self.mod_type_check = mod_type_check or _mod_type
if "__context__" not in self.pack:
self.pack["__context__"] = None
for k, v in self.pack.items():
if v is None: # if the value of a pack is None, lets make an empty dict
self.context_dict.setdefault(k, {})
self.pack[k] = hubblestack.utils.context.NamespacedDictWrapper(self.context_dict, k)
self.whitelist = whitelist
self.virtual_enable = virtual_enable
self.initial_load = True
# names of modules that we don't have (errors, __virtual__, etc.)
self.missing_modules = {}
self.loaded_modules = {}
self.loaded_files = set()
self.static_modules = static_modules if static_modules else []
if virtual_funcs is None:
virtual_funcs = []
self.virtual_funcs = virtual_funcs
self.disabled = set(self.opts.get("disable_{0}{1}".format(self.tag, "" if self.tag[-1] == "s" else "s"), []))
self.suffix_map = {}
self.suffix_order = [""]
for (suffix, mode, kind) in SUFFIXES:
self.suffix_map[suffix] = (suffix, mode, kind)
self.suffix_order.append(suffix)
self._lock = threading.RLock()
self._refresh_file_mapping()
super(LazyLoader, self).__init__()
for subspace in ("int", "ext", "e_int", "salt"):
_generate_module(".".join([self.loaded_base_name, tag]))
_generate_module(".".join([self.loaded_base_name, tag, subspace]))
def __getitem__(self, item):
return super(LazyLoader, self).__getitem__(item)
def __getattr__(self, mod_name):
if mod_name in ("__getstate__", "__setstate__"):
return object.__getattribute__(self, mod_name)
try:
return object.__getattr__(self, mod_name)
except AttributeError:
pass
if mod_name not in self.loaded_modules and not self.loaded:
for name in self._iter_files(mod_name):
if name in self.loaded_files:
continue
if self._load_module(name) and mod_name in self.loaded_modules:
break
if mod_name in self.loaded_modules:
return self.loaded_modules[mod_name]
else:
raise AttributeError(mod_name)
def missing_fun_string(self, function_name):
mod_name = function_name.split(".")[0]
if mod_name in self.loaded_modules:
return "'{0}' is not available.".format(function_name)
else:
try:
reason = self.missing_modules[mod_name]
except KeyError:
return "'{0}' is not available.".format(function_name)
else:
if reason is not None:
return "'{0}' __virtual__ returned False: {1}".format(mod_name, reason)
else:
return "'{0}' __virtual__ returned False".format(mod_name)
def _refresh_file_mapping(self):
if self.opts.get("cython_enable", True) is True:
try:
global pyximport
pyximport = __import__("pyximport")
pyximport.install()
self.suffix_map[".pyx"] = tuple()
except ImportError:
log.info(
"Cython is enabled in the options but not present " "in the system path. Skipping Cython modules."
)
if self.opts.get("enable_zip_modules", True) is True:
self.suffix_map[".zip"] = tuple()
self.suffix_map[""] = ("", "", MODULE_KIND_PKG_DIRECTORY)
self.file_mapping = hubblestack.utils.odict.OrderedDict()
opt_match = []
def _replace_pre_ext(obj):
opt_match.append(obj)
return ""
for mod_dir in self.module_dirs:
try:
files = sorted(x for x in os.listdir(mod_dir) if x != "__pycache__")
except OSError:
continue
try:
pycache_files = [
os.path.join("__pycache__", x) for x in sorted(os.listdir(os.path.join(mod_dir, "__pycache__")))
]
except OSError:
pass
else:
files.extend(pycache_files)
for filename in files:
try:
dirname, basename = os.path.split(filename)
if basename.startswith("_"):
continue
f_noext, ext = os.path.splitext(basename)
f_noext = PY3_PRE_EXT.sub(_replace_pre_ext, f_noext)
try:
opt_level = int(opt_match.pop().group(1).rsplit("-", 1)[-1])
except (AttributeError, IndexError, ValueError):
opt_level = 0
try:
opt_index = self.opts["optimization_order"].index(opt_level)
except KeyError:
log.trace(
"Disallowed optimization level %d for module "
"name '%s', skipping. Add %d to the "
"'optimization_order' config option if you "
"do not want to ignore this optimization "
"level.",
opt_level,
f_noext,
opt_level,
)
continue
else:
opt_index = 0
if ext not in self.suffix_map:
continue
if f_noext in self.disabled:
log.trace("Skipping %s, it is disabled by configuration", filename)
continue
fpath = os.path.join(mod_dir, filename)
if ext == "":
subfiles = os.listdir(fpath)
for suffix in self.suffix_order:
if "" == suffix:
continue
init_file = "__init__{0}".format(suffix)
if init_file in subfiles:
break
else:
continue
try:
curr_ext = self.file_mapping[f_noext][1]
curr_opt_index = self.file_mapping[f_noext][2]
except KeyError:
pass
else:
if "" in (curr_ext, ext) and curr_ext != ext:
log.error("Module/package collision: '%s' and '%s'", fpath, self.file_mapping[f_noext][0])
if ext == ".pyc" and curr_ext == ".pyc":
if opt_index >= curr_opt_index:
continue
if not dirname and ext == ".pyc":
continue
self.file_mapping[f_noext] = (fpath, ext, opt_index)
except OSError:
continue
for smod in self.static_modules:
f_noext = smod.split(".")[-1]
self.file_mapping[f_noext] = (smod, ".o", 0)
def clear(self):
with self._lock:
super(LazyLoader, self).clear()
self.loaded_files = set()
self.missing_modules = {}
self.loaded_modules = {}
if hasattr(self, "opts"):
self._refresh_file_mapping()
self.initial_load = False
def __prep_mod_opts(self, opts):
if "__grains__" not in self.pack:
self.context_dict["grains"] = opts.get("grains", {})
self.pack["__grains__"] = hubblestack.utils.context.NamespacedDictWrapper(self.context_dict, "grains")
if "__pillar__" not in self.pack:
self.context_dict["pillar"] = opts.get("pillar", {})
self.pack["__pillar__"] = hubblestack.utils.context.NamespacedDictWrapper(self.context_dict, "pillar")
ret = opts.copy()
for item in ("logger",):
if item in ret:
del ret[item]
pres_opt = get_preserved_opts(opts)
if pres_opt is not None:
pres_opt.update(ret)
return pres_opt
return ret
def _iter_files(self, mod_name):
if mod_name in self.file_mapping:
yield mod_name
for k in self.file_mapping:
if mod_name in k:
yield k
for k in self.file_mapping:
if mod_name not in k:
yield k
def _reload_submodules(self, mod):
submodules = (getattr(mod, sname) for sname in dir(mod) if isinstance(getattr(mod, sname), mod.__class__))
for submodule in submodules:
if submodule.__name__.startswith(mod.__name__ + "."):
reload_module(submodule)
self._reload_submodules(submodule)
def _load_module(self, name):
mod = None
fpath, suffix = self.file_mapping[name][:2]
self.loaded_files.add(name)
fpath_dirname = os.path.dirname(fpath)
try:
sys.path.append(fpath_dirname)
if fpath_dirname.endswith("__pycache__"):
sys.path.append(os.path.dirname(fpath_dirname))
if suffix == ".pyx":
mod = pyximport.load_module(name, fpath, tempfile.gettempdir())
elif suffix == ".o":
top_mod = __import__(fpath, globals(), locals(), [])
comps = fpath.split(".")
if len(comps) < 2:
mod = top_mod
else:
mod = top_mod
for subname in comps[1:]:
mod = getattr(mod, subname)
elif suffix == ".zip":
mod = zipimporter(fpath).load_module(name)
else:
desc = self.suffix_map[suffix]
try:
mod_namespace = ".".join((self.loaded_base_name, self.mod_type_check(fpath), self.tag, name))
except TypeError:
mod_namespace = "{0}.{1}.{2}.{3}".format(
self.loaded_base_name, self.mod_type_check(fpath), self.tag, name
)
if suffix == "":
# pylint: disable=no-member
# Package directory, look for __init__
loader_details = [
(importlib.machinery.SourceFileLoader, importlib.machinery.SOURCE_SUFFIXES),
(importlib.machinery.SourcelessFileLoader, importlib.machinery.BYTECODE_SUFFIXES),
(importlib.machinery.ExtensionFileLoader, importlib.machinery.EXTENSION_SUFFIXES),
]
file_finder = importlib.machinery.FileFinder(fpath_dirname, *loader_details)
spec = file_finder.find_spec(mod_namespace)
if spec is None:
raise ImportError()
# TODO: Get rid of load_module in favor of
# exec_module below. load_module is deprecated, but
# loading using exec_module has been causing odd things
# with the magic dunders we pack into the loaded
# modules, most notably with salt-ssh's __opts__.
mod = spec.loader.load_module()
sys.modules[mod_namespace] = mod
if not self.initial_load:
self._reload_submodules(mod)
else:
loader = MODULE_KIND_MAP[desc[2]](mod_namespace, fpath)
spec = importlib.util.spec_from_file_location(mod_namespace, fpath, loader=loader)
if spec is None:
raise ImportError()
mod = spec.loader.load_module()
# mod = importlib.util.module_from_spec(spec)
# spec.loader.exec_module(mod)
# pylint: enable=no-member
sys.modules[mod_namespace] = mod
except IOError:
raise
except ImportError as exc:
if "magic number" in str(exc):
error_msg = "Failed to import {0} {1}. Bad magic number. If migrating from Python2 to Python3, remove all .pyc files and try again.".format(
self.tag, name
)
log.warning(error_msg)
self.missing_modules[name] = error_msg
log.debug("Failed to import %s %s:\n", self.tag, name, exc_info=True)
self.missing_modules[name] = exc
return False
except Exception as error:
log.error(
"Failed to import %s %s, this is due most likely to a " "syntax error:\n",
self.tag,
name,
exc_info=True,
)
self.missing_modules[name] = error
return False
except SystemExit as error:
try:
fn_, _, caller, _ = traceback.extract_tb(sys.exc_info()[2])[-1]
except Exception:
pass
else:
tgt_fn = os.path.join("salt", "utils", "process.py")
if fn_.endswith(tgt_fn) and "_handle_signals" in caller:
# Race conditon, SIGTERM or SIGINT received while loader
# was in process of loading a module. Call sys.exit to
# ensure that the process is killed.
sys.exit(0)
log.error("Failed to import %s %s as the module called exit()\n", self.tag, name, exc_info=True)
self.missing_modules[name] = error
return False
finally:
sys.path.remove(fpath_dirname)
if hasattr(mod, "__opts__"):
mod.__opts__.update(self.opts)
else:
mod.__opts__ = self.opts
# pack whatever other globals we were asked to
for p_name, p_value in self.pack.items():
setattr(mod, p_name, p_value)
module_name = mod.__name__.rsplit(".", 1)[-1]
if callable(self.xlate_modnames):
module_name = self.xlate_modnames([module_name], name, fpath, suffix, mod, mode="module_name")
name = self.xlate_modnames([name], name, fpath, suffix, mod, mode="name")
# Call a module's initialization method if it exists
module_init = getattr(mod, "__init__", None)
if inspect.isfunction(module_init):
try:
module_init(self.opts)
except TypeError as e:
log.error(e)
except Exception:
err_string = "__init__ failed"
log.debug("Error loading %s.%s: %s", self.tag, module_name, err_string, exc_info=True)
self.missing_modules[module_name] = err_string
self.missing_modules[name] = err_string
return False
if self.virtual_enable:
virtual_funcs_to_process = ["__virtual__"] + self.virtual_funcs
for virtual_func in virtual_funcs_to_process:
virtual_ret, module_name, virtual_err, virtual_aliases = self._process_virtual(
mod, module_name, virtual_func
)
if virtual_err is not None:
log.trace("Error loading %s.%s: %s", self.tag, module_name, virtual_err)
if virtual_ret is not True and module_name not in self.missing_modules:
self.missing_modules[module_name] = virtual_err
self.missing_modules[name] = virtual_err
return False
else:
virtual_aliases = ()
if getattr(mod, "__load__", False) is not False:
log.info(
"The functions from module '%s' are being loaded from the " "provided __load__ attribute", module_name
)
mod_names = [module_name] + list(virtual_aliases)
if callable(self.xlate_modnames):
mod_names = self.xlate_modnames(mod_names, name, fpath, suffix, mod, mode="mod_names")
mod_dict = dict(((x, self.loaded_modules.get(x, self.mod_dict_class())) for x in mod_names))
for attr in getattr(mod, "__load__", dir(mod)):
if attr.startswith("_"):
continue
func = getattr(mod, attr)
if not inspect.isfunction(func) and not isinstance(func, functools.partial):
continue
if callable(self.funcname_filter) and not self.funcname_filter(attr, mod):
continue
# If the module has the __func_alias__ attribute, it must be a
# dictionary mapping in the form of(key -> value):
# <real-func-name> -> <desired-func-name>
#
# It default's of course to the found callable attribute name
funcname = getattr(mod, "__func_alias__", {}).get(attr, attr)
for tgt_mod in mod_names:
try:
full_funcname = ".".join((tgt_mod, funcname))
except TypeError:
full_funcname = "{0}.{1}".format(tgt_mod, funcname)
if callable(self.xlate_funcnames):
funcname, full_funcname = self.xlate_funcnames(
name, fpath, suffix, tgt_mod, funcname, full_funcname, mod, func
)
if full_funcname not in self._dict:
self._dict[full_funcname] = func
if funcname not in mod_dict[tgt_mod]:
setattr(mod_dict[tgt_mod], funcname, func)
mod_dict[tgt_mod][funcname] = func
self._apply_outputter(func, mod)
try:
Depends.enforce_dependencies(self._dict, self.tag, name)
except RuntimeError as exc:
log.info("Depends.enforce_dependencies() failed for the following " "reason: %s", exc)
for tgt_mod in mod_names:
self.loaded_modules[tgt_mod] = mod_dict[tgt_mod]
return True
def _load(self, key):
if not isinstance(key, str):
raise KeyError("The key must be a string.")
if "." not in key:
raise KeyError("The key '{0}' should contain a '.'".format(key))
mod_name, _ = key.split(".", 1)
with self._lock:
if mod_name in self.missing_modules or key in self._dict:
return True
if self.whitelist and mod_name not in self.whitelist:
log.error(
"Failed to load function %s because its module (%s) is " "not in the whitelist: %s",
key,
mod_name,
self.whitelist,
)
raise KeyError(key)
def _inner_load(mod_name):
for name in self._iter_files(mod_name):
if name in self.loaded_files:
continue
if self._load_module(name) and key in self._dict:
return True
return False
ret = None
reloaded = False
while True:
try:
ret = _inner_load(mod_name)
if not reloaded and ret is not True:
self._refresh_file_mapping()
reloaded = True
continue
break
except IOError:
if not reloaded:
self._refresh_file_mapping()
reloaded = True
continue
return ret
def _load_all(self):
with self._lock:
for name in self.file_mapping:
if name in self.loaded_files or name in self.missing_modules:
continue
self._load_module(name)
self.loaded = True
def reload_modules(self):
with self._lock:
self.loaded_files = set()
self._load_all()
def _apply_outputter(self, func, mod):
if hasattr(mod, "__outputter__"):
outp = mod.__outputter__
if func.__name__ in outp:
func.__outputter__ = outp[func.__name__]
def _process_virtual(self, mod, module_name, virtual_func="__virtual__"):
virtual_aliases = getattr(mod, "__virtual_aliases__", tuple())
try:
error_reason = None
if hasattr(mod, "__virtual__") and inspect.isfunction(mod.__virtual__):
try:
start = time.time()
virtual = getattr(mod, virtual_func)()
if isinstance(virtual, tuple):
error_reason = virtual[1]
virtual = virtual[0]
if self.opts.get("virtual_timer", False):
end = time.time() - start
msg = "Virtual function took {0} seconds for {1}".format(end, module_name)
log.warning(msg)
except Exception as exc:
error_reason = (
"Exception raised when processing __virtual__ function"
" for {0}. Module will not be loaded: {1}".format(mod.__name__, exc)
)
log.error(error_reason, exc_info=True)
virtual = None
virtualname = getattr(mod, "__virtualname__", virtual)
if not virtual:
# if __virtual__() evaluates to False then the module
# wasn't meant for this platform or it's not supposed to
# load for some other reason.
# Some modules might accidentally return None and are
# improperly loaded
if virtual is None:
log.warning(
"%s.__virtual__() is wrongly returning `None`. "
"It should either return `True`, `False` or a new "
"name. If you're the developer of the module "
"'%s', please fix this.",
mod.__name__,
module_name,
)
return (False, module_name, error_reason, virtual_aliases)
# or module renames
if virtual is not True and module_name != virtual:
# The module is renaming itself. Updating the module name
# with the new name
log.trace("Loaded %s as virtual %s", module_name, virtual)
if not hasattr(mod, "__virtualname__"):
hubblestack.utils.versions.warn_until(
"Hydrogen",
"The '{0}' module is renaming itself in its "
"__virtual__() function ({1} => {2}). Please "
"set it's virtual name as the "
"'__virtualname__' module attribute. "
"Example: \"__virtualname__ = '{2}'\"".format(mod.__name__, module_name, virtual),
)
if virtualname != virtual:
# being returned by the __virtual__() function. This
# should be considered an error.
log.error(
"The module '%s' is showing some bad usage. Its "
"__virtualname__ attribute is set to '%s' yet the "
"__virtual__() function is returning '%s'. These "
"values should match!",
mod.__name__,
virtualname,
virtual,
)
module_name = virtualname
# If the __virtual__ function returns True and __virtualname__
# is set then use it
elif virtual is True and virtualname != module_name:
if virtualname is not True:
module_name = virtualname
except KeyError:
# Key errors come out of the virtual function when passing
# in incomplete grains sets, these can be safely ignored
# and logged to debug, still, it includes the traceback to
# help debugging.
log.error('Failed to LazyLoad "%s"', module_name, exc_info=True)
except Exception:
# If the module throws an exception during __virtual__()
# then log the information and continue to the next.
log.error("Failed to read the virtual function for %s: %s", self.tag, module_name, exc_info=True)
return (False, module_name, error_reason, virtual_aliases)
return (True, module_name, None, virtual_aliases)
class FilterDictWrapper(MutableMapping):
def __init__(self, d, suffix):
self._dict = d
self.suffix = suffix
def __setitem__(self, key, val):
self._dict[key] = val
def __delitem__(self, key):
del self._dict[key]
def __getitem__(self, key):
return self._dict[key + self.suffix]
def __len__(self):
return len(self._dict)
def __iter__(self):
for key in self._dict:
if key.endswith(self.suffix):
yield key.replace(self.suffix, "")
def matchers(opts):
return LazyLoader(_module_dirs(opts, "matchers"), opts, tag="matchers")
def _nova_funcname_filter(funcname, mod): # pylint: disable=unused-argument
if funcname == "audit":
return True
return False
def _nova_xlate_modnames(mod_names, name, fpath, suffix, mod, mode="mod_names"): # pylint: disable=unused-argument
new_modname = "/" + name
if mode in ("module_name", "name"):
return new_modname
return [new_modname]
def _nova_xlate_funcnames(
name, fpath, suffix, tgt_mod, funcname, full_funcname, mod, func
): # pylint: disable=unused-argument
new_funcname = suffix[1:]
if new_funcname == "pyc":
new_funcname = "py"
return new_funcname, ".".join([name, new_funcname])
def nova(hubble_dir, opts, modules, context=None):
loader = LazyLoader(
_module_dirs(opts, "nova"),
opts,
tag="nova",
funcname_filter=_nova_funcname_filter,
xlate_modnames=_nova_xlate_modnames,
xlate_funcnames=_nova_xlate_funcnames,
pack={"__context__": context, "__mods__": modules},
)
loader.__data__ = data = dict()
loader.__missing_data__ = missing_data = dict()
for mod_dir in hubble_dir:
for path, _, filenames in os.walk(mod_dir):
for filename in filenames:
pathname = os.path.join(path, filename)
name = pathname[len(mod_dir) :]
if filename.endswith(".yaml"):
try:
with open(pathname, "r") as fh:
data[name] = yaml.safe_load(fh)
except Exception as exc:
missing_data[name] = str(exc)
log.exception("Error loading yaml from %s", pathnmame)
return loader
| true | true |
f72e3b933ba4563bbe21eb2dba0d89e1a256b8b7 | 6,057 | py | Python | update-server/tests/buildroot/test_update.py | faliester/opentrons | e945d0f72fed39b0f68c0b30b7afd1981644184f | [
"Apache-2.0"
] | 235 | 2017-10-27T20:37:27.000Z | 2022-03-30T14:09:49.000Z | update-server/tests/buildroot/test_update.py | faliester/opentrons | e945d0f72fed39b0f68c0b30b7afd1981644184f | [
"Apache-2.0"
] | 8,425 | 2017-10-26T15:25:43.000Z | 2022-03-31T23:54:26.000Z | update-server/tests/buildroot/test_update.py | faliester/opentrons | e945d0f72fed39b0f68c0b30b7afd1981644184f | [
"Apache-2.0"
] | 130 | 2017-11-09T21:02:37.000Z | 2022-03-15T18:01:24.000Z | """ Tests for the update server state machine in otupdate.buildroot.update
"""
import asyncio
import binascii
import hashlib
import zipfile
import pytest
from otupdate.buildroot import update, config, file_actions
from otupdate.buildroot.update_session import UpdateSession, Stages
def session_endpoint(token, endpoint):
return f'/server/update/{token}/{endpoint}'
@pytest.fixture
async def update_session(test_cli):
resp = await test_cli.post('/server/update/begin')
body = await resp.json()
yield body['token']
await test_cli.post('/server/update/cancel')
async def test_begin(test_cli):
# Creating a session should work
resp = await test_cli.post('/server/update/begin')
body = await resp.json()
assert resp.status == 201
assert 'token' in body
assert test_cli.server.app.get(update.SESSION_VARNAME)
assert test_cli.server.app[update.SESSION_VARNAME].token\
== body['token']
# Creating a session twice shouldn’t
resp = await test_cli.post('/server/update/begin')
body = await resp.json()
assert resp.status == 409
assert 'message' in body
async def test_cancel(test_cli):
# cancelling when there’s a session should work great
resp = await test_cli.post('/server/update/begin')
assert test_cli.server.app.get(update.SESSION_VARNAME)
resp = await test_cli.post('/server/update/cancel')
assert resp.status == 200
assert test_cli.server.app.get(update.SESSION_VARNAME) is None
# and so should cancelling when there isn’t one
resp = await test_cli.post('/server/update/cancel')
assert resp.status == 200
async def test_commit_fails_wrong_state(test_cli, update_session):
resp = await test_cli.post(session_endpoint(update_session, 'commit'))
assert resp.status == 409
async def test_future_chain(otupdate_config, downloaded_update_file,
loop, testing_partition):
conf = config.load_from_path(otupdate_config)
session = UpdateSession(conf.download_storage_path)
fut = update._begin_validation(session,
conf,
loop,
downloaded_update_file)
assert session.stage == Stages.VALIDATING
last_progress = 0.0
while session.stage == Stages.VALIDATING:
assert session.state['progress'] >= last_progress
assert session.state['stage'] == 'validating'
assert session.stage == Stages.VALIDATING
last_progress = session.state['progress']
await asyncio.sleep(0.01)
assert fut.done()
last_progress = 0.0
while session.stage == Stages.WRITING:
assert session.state['progress'] >= last_progress
last_progress = session.state['progress']
await asyncio.sleep(0.1)
assert session.stage == Stages.DONE, session.error
@pytest.mark.exclude_rootfs_ext4
async def test_session_catches_validation_fail(otupdate_config,
downloaded_update_file,
loop):
conf = config.load_from_path(otupdate_config)
session = UpdateSession(conf.download_storage_path)
fut = update._begin_validation(
session,
conf,
loop,
downloaded_update_file)
with pytest.raises(file_actions.FileMissing):
await fut
assert session.state['stage'] == 'error'
assert session.stage == Stages.ERROR
assert 'error' in session.state
assert 'message' in session.state
async def test_update_happypath(test_cli, update_session,
downloaded_update_file, loop,
testing_partition):
# Upload
resp = await test_cli.post(
session_endpoint(update_session, 'file'),
data={'ot2-system.zip': open(downloaded_update_file, 'rb')})
assert resp.status == 201
body = await resp.json()
assert body['stage'] == 'validating'
assert 'progress' in body
# Wait through validation
then = loop.time()
last_progress = 0.0
while body['stage'] == 'validating':
assert body['progress'] >= last_progress
resp = await test_cli.get(session_endpoint(update_session,
'status'))
assert resp.status == 200
body = await resp.json()
last_progress = body['progress']
assert loop.time() - then <= 300
if body['stage'] == 'writing':
# Wait through write
then = loop.time()
last_progress = 0.0
while body['stage'] == 'writing':
assert body['progress'] >= last_progress
resp = await test_cli.get(session_endpoint(update_session,
'status'))
assert resp.status == 200
body = await resp.json()
last_progress = body['progress']
assert loop.time() - then <= 300
assert body['stage'] == 'done'
tp_hasher = hashlib.sha256()
tp_hasher.update(open(testing_partition, 'rb').read())
tp_hash = binascii.hexlify(tp_hasher.digest())
with zipfile.ZipFile(downloaded_update_file, 'r') as zf:
assert tp_hash == zf.read('rootfs.ext4.hash').strip()
@pytest.mark.exclude_rootfs_ext4
async def test_update_catches_validation_fail(test_cli, update_session,
downloaded_update_file, loop,
testing_partition):
# Upload
resp = await test_cli.post(
session_endpoint(update_session, 'file'),
data={'ot2-system.zip': open(downloaded_update_file, 'rb')})
assert resp.status == 201
body = await resp.json()
assert body['stage'] == 'validating'
assert 'progress' in body
while body['stage'] == 'validating':
resp = await test_cli.get(
session_endpoint(update_session, 'status'))
body = await resp.json()
assert body['stage'] == 'error'
assert body['error'] == 'File Missing'
| 35.421053 | 75 | 0.632491 | import asyncio
import binascii
import hashlib
import zipfile
import pytest
from otupdate.buildroot import update, config, file_actions
from otupdate.buildroot.update_session import UpdateSession, Stages
def session_endpoint(token, endpoint):
return f'/server/update/{token}/{endpoint}'
@pytest.fixture
async def update_session(test_cli):
resp = await test_cli.post('/server/update/begin')
body = await resp.json()
yield body['token']
await test_cli.post('/server/update/cancel')
async def test_begin(test_cli):
resp = await test_cli.post('/server/update/begin')
body = await resp.json()
assert resp.status == 201
assert 'token' in body
assert test_cli.server.app.get(update.SESSION_VARNAME)
assert test_cli.server.app[update.SESSION_VARNAME].token\
== body['token']
resp = await test_cli.post('/server/update/begin')
body = await resp.json()
assert resp.status == 409
assert 'message' in body
async def test_cancel(test_cli):
resp = await test_cli.post('/server/update/begin')
assert test_cli.server.app.get(update.SESSION_VARNAME)
resp = await test_cli.post('/server/update/cancel')
assert resp.status == 200
assert test_cli.server.app.get(update.SESSION_VARNAME) is None
resp = await test_cli.post('/server/update/cancel')
assert resp.status == 200
async def test_commit_fails_wrong_state(test_cli, update_session):
resp = await test_cli.post(session_endpoint(update_session, 'commit'))
assert resp.status == 409
async def test_future_chain(otupdate_config, downloaded_update_file,
loop, testing_partition):
conf = config.load_from_path(otupdate_config)
session = UpdateSession(conf.download_storage_path)
fut = update._begin_validation(session,
conf,
loop,
downloaded_update_file)
assert session.stage == Stages.VALIDATING
last_progress = 0.0
while session.stage == Stages.VALIDATING:
assert session.state['progress'] >= last_progress
assert session.state['stage'] == 'validating'
assert session.stage == Stages.VALIDATING
last_progress = session.state['progress']
await asyncio.sleep(0.01)
assert fut.done()
last_progress = 0.0
while session.stage == Stages.WRITING:
assert session.state['progress'] >= last_progress
last_progress = session.state['progress']
await asyncio.sleep(0.1)
assert session.stage == Stages.DONE, session.error
@pytest.mark.exclude_rootfs_ext4
async def test_session_catches_validation_fail(otupdate_config,
downloaded_update_file,
loop):
conf = config.load_from_path(otupdate_config)
session = UpdateSession(conf.download_storage_path)
fut = update._begin_validation(
session,
conf,
loop,
downloaded_update_file)
with pytest.raises(file_actions.FileMissing):
await fut
assert session.state['stage'] == 'error'
assert session.stage == Stages.ERROR
assert 'error' in session.state
assert 'message' in session.state
async def test_update_happypath(test_cli, update_session,
downloaded_update_file, loop,
testing_partition):
resp = await test_cli.post(
session_endpoint(update_session, 'file'),
data={'ot2-system.zip': open(downloaded_update_file, 'rb')})
assert resp.status == 201
body = await resp.json()
assert body['stage'] == 'validating'
assert 'progress' in body
then = loop.time()
last_progress = 0.0
while body['stage'] == 'validating':
assert body['progress'] >= last_progress
resp = await test_cli.get(session_endpoint(update_session,
'status'))
assert resp.status == 200
body = await resp.json()
last_progress = body['progress']
assert loop.time() - then <= 300
if body['stage'] == 'writing':
then = loop.time()
last_progress = 0.0
while body['stage'] == 'writing':
assert body['progress'] >= last_progress
resp = await test_cli.get(session_endpoint(update_session,
'status'))
assert resp.status == 200
body = await resp.json()
last_progress = body['progress']
assert loop.time() - then <= 300
assert body['stage'] == 'done'
tp_hasher = hashlib.sha256()
tp_hasher.update(open(testing_partition, 'rb').read())
tp_hash = binascii.hexlify(tp_hasher.digest())
with zipfile.ZipFile(downloaded_update_file, 'r') as zf:
assert tp_hash == zf.read('rootfs.ext4.hash').strip()
@pytest.mark.exclude_rootfs_ext4
async def test_update_catches_validation_fail(test_cli, update_session,
downloaded_update_file, loop,
testing_partition):
resp = await test_cli.post(
session_endpoint(update_session, 'file'),
data={'ot2-system.zip': open(downloaded_update_file, 'rb')})
assert resp.status == 201
body = await resp.json()
assert body['stage'] == 'validating'
assert 'progress' in body
while body['stage'] == 'validating':
resp = await test_cli.get(
session_endpoint(update_session, 'status'))
body = await resp.json()
assert body['stage'] == 'error'
assert body['error'] == 'File Missing'
| true | true |
f72e3c6beeda2758e387c16130afd284e00fb3b4 | 1,715 | py | Python | py/getting_started.py | qcgm1978/formula | fee12667b585e37b21768f4d165b8bc5f2d4f448 | [
"Apache-2.0"
] | null | null | null | py/getting_started.py | qcgm1978/formula | fee12667b585e37b21768f4d165b8bc5f2d4f448 | [
"Apache-2.0"
] | null | null | null | py/getting_started.py | qcgm1978/formula | fee12667b585e37b21768f4d165b8bc5f2d4f448 | [
"Apache-2.0"
] | null | null | null | import unittest,math
from datatype import DataTypes
import numpy as np
from scipy import stats
class TDD_GETTING_STARTED(unittest.TestCase):
def test_mse(self):
a=[1,2,3]
b=[4,5,6]
self.assertRaises(TypeError,DataTypes({'a':a}).getMSE,b)
def test_datatypes(self):
d=DataTypes(5)
self.assertTrue(d.Numerical())
self.assertTrue(d.Discrete())
self.assertFalse(d.Continuous())
d=DataTypes(5.)
self.assertTrue(d.Numerical())
self.assertFalse(d.Discrete())
self.assertTrue(d.Continuous())
d=DataTypes({"speed": [99,86,87,88,111,86,103,87,94,78,77,85,86]})
d1=DataTypes({"speed": [99,86,87,88,86,103,87,94,78,77,85,86]})
m=d.getMean()
self.assertAlmostEqual(m, 89.77, 1)
median = d.getMedian()
median1 = d1.getMedian()
self.assertEqual(median,87)
self.assertEqual(median1, 86.5)
mode = d.getMode()
# print(mode)
self.assertEqual(mode[0],86)
self.assertEqual(mode.mode,86)
self.assertEqual(mode[1],3)
self.assertEqual(mode.count, 3)
def test_standard_deviation(self):
d = DataTypes({'speed': [86, 87, 88, 86, 87, 85, 86]})
d1 = DataTypes({'speed': [32,111,138,28,59,77,97]})
s = d.getStd()
s1 = d1.getStd()
self.assertAlmostEqual(s,.9,2)
self.assertAlmostEqual(s1, 37.85, 2)
v=d1.getVariance()
self.assertAlmostEqual(v,1432.2,1)
# the formula to find the standard deviation is the square root of the variance:
self.assertEqual(s1,math.sqrt(v))
self.assertEqual(s1 ** 2, (v))
if __name__ == '__main__':
unittest.main()
| 35.729167 | 88 | 0.602332 | import unittest,math
from datatype import DataTypes
import numpy as np
from scipy import stats
class TDD_GETTING_STARTED(unittest.TestCase):
def test_mse(self):
a=[1,2,3]
b=[4,5,6]
self.assertRaises(TypeError,DataTypes({'a':a}).getMSE,b)
def test_datatypes(self):
d=DataTypes(5)
self.assertTrue(d.Numerical())
self.assertTrue(d.Discrete())
self.assertFalse(d.Continuous())
d=DataTypes(5.)
self.assertTrue(d.Numerical())
self.assertFalse(d.Discrete())
self.assertTrue(d.Continuous())
d=DataTypes({"speed": [99,86,87,88,111,86,103,87,94,78,77,85,86]})
d1=DataTypes({"speed": [99,86,87,88,86,103,87,94,78,77,85,86]})
m=d.getMean()
self.assertAlmostEqual(m, 89.77, 1)
median = d.getMedian()
median1 = d1.getMedian()
self.assertEqual(median,87)
self.assertEqual(median1, 86.5)
mode = d.getMode()
self.assertEqual(mode[0],86)
self.assertEqual(mode.mode,86)
self.assertEqual(mode[1],3)
self.assertEqual(mode.count, 3)
def test_standard_deviation(self):
d = DataTypes({'speed': [86, 87, 88, 86, 87, 85, 86]})
d1 = DataTypes({'speed': [32,111,138,28,59,77,97]})
s = d.getStd()
s1 = d1.getStd()
self.assertAlmostEqual(s,.9,2)
self.assertAlmostEqual(s1, 37.85, 2)
v=d1.getVariance()
self.assertAlmostEqual(v,1432.2,1)
self.assertEqual(s1,math.sqrt(v))
self.assertEqual(s1 ** 2, (v))
if __name__ == '__main__':
unittest.main()
| true | true |
f72e3d7d277d819667954bd3c25a462510891fc1 | 1,367 | py | Python | lab1/queryEndpoint.py | KimMupfumira/python-2022 | 9b9936c5f9516fff8393b8f91a093d6dd19e7cac | [
"Apache-2.0"
] | 1 | 2022-02-08T20:25:23.000Z | 2022-02-08T20:25:23.000Z | lab1/queryEndpoint.py | KimMupfumira/python-2022 | 9b9936c5f9516fff8393b8f91a093d6dd19e7cac | [
"Apache-2.0"
] | null | null | null | lab1/queryEndpoint.py | KimMupfumira/python-2022 | 9b9936c5f9516fff8393b8f91a093d6dd19e7cac | [
"Apache-2.0"
] | 5 | 2022-02-01T14:14:17.000Z | 2022-03-28T21:41:55.000Z | '''
Created on 19 Jan 2021
@author: ejimenez-ruiz
'''
from SPARQLWrapper import SPARQLWrapper, JSON
import time
def queryRemoteGraph(endpoint_url, query, attempts=3):
sparqlw = SPARQLWrapper(endpoint_url)
sparqlw.setReturnFormat(JSON)
try:
sparqlw.setQuery(query)
results = sparqlw.query().convert()
#Prints JSON file
#print(results)
for result in results["results"]["bindings"]:
#Prints individual results
print(result["x"]["value"])
except:
print("Query '%s' failed. Attempts: %s" % (query, str(attempts)))
time.sleep(60) #to avoid limit of calls, sleep 60s
attempts-=1
if attempts>0:
return queryRemoteGraph(endpoint_url, query, attempts)
else:
return None
#Query a remote RDF graph (e.g., SPARQL endpoint)
dbpedia_endpoint = "http://dbpedia.org/sparql"
dbpedia_query = "SELECT DISTINCT ?x WHERE { <http://dbpedia.org/resource/Chicago_Bulls> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> ?x . }"
print("\nQuerying DBPedia Knowledge Graph (types of Chicago Bulls)")
queryRemoteGraph(dbpedia_endpoint, dbpedia_query)
print("\nTests successful!!")
| 23.568966 | 145 | 0.585223 |
from SPARQLWrapper import SPARQLWrapper, JSON
import time
def queryRemoteGraph(endpoint_url, query, attempts=3):
sparqlw = SPARQLWrapper(endpoint_url)
sparqlw.setReturnFormat(JSON)
try:
sparqlw.setQuery(query)
results = sparqlw.query().convert()
for result in results["results"]["bindings"]:
print(result["x"]["value"])
except:
print("Query '%s' failed. Attempts: %s" % (query, str(attempts)))
time.sleep(60)
attempts-=1
if attempts>0:
return queryRemoteGraph(endpoint_url, query, attempts)
else:
return None
dbpedia_endpoint = "http://dbpedia.org/sparql"
dbpedia_query = "SELECT DISTINCT ?x WHERE { <http://dbpedia.org/resource/Chicago_Bulls> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> ?x . }"
print("\nQuerying DBPedia Knowledge Graph (types of Chicago Bulls)")
queryRemoteGraph(dbpedia_endpoint, dbpedia_query)
print("\nTests successful!!")
| true | true |
f72e3ea1251b6b2b5249aab531946b974437cbe3 | 3,236 | py | Python | population_class.py | adamscj14/Population-Genetics-Simulations | 092648f42c0a1bf3fcdb2b58ac78bb58e1a406c0 | [
"MIT"
] | null | null | null | population_class.py | adamscj14/Population-Genetics-Simulations | 092648f42c0a1bf3fcdb2b58ac78bb58e1a406c0 | [
"MIT"
] | null | null | null | population_class.py | adamscj14/Population-Genetics-Simulations | 092648f42c0a1bf3fcdb2b58ac78bb58e1a406c0 | [
"MIT"
] | null | null | null |
import random
## Class that holds population object
class Population:
def __init__(self, size, allele_frequency, a_advantage=0.0, A_advantage=0.0, mut_rate_a_to_A=0.0, mut_rate_A_to_a=0.0):
self.size = size
self.parent_allele_freq = float(allele_frequency)
self.child_allele_freq = 0
self.parent_individuals = self.init_individuals()
self.child_individuals = []
self.a_advantage = float(a_advantage)
self.A_advantage = float(A_advantage)
self.mut_a_to_A = float(mut_rate_a_to_A)
self.mut_A_to_a = float(mut_rate_A_to_a)
def init_individuals(self):
hwe = self.get_hwe()
individuals = list(range(self.size))
count = 0
for index in [0, 1, 2]:
allele_one = 1
allele_two = 1
num_indivs = hwe[index]
print(num_indivs)
if index == 1:
allele_one = 1
allele_two = 0
elif index == 2:
allele_one = 0
allele_two = 0
for i in range(num_indivs):
individuals[count] = DiploidIndividual(allele_one, allele_two)
count += 1
return individuals
def advance_generation(self):
if len(self.child_individuals) != 0:
self.parent_individuals = self.child_individuals[:]
self.parent_allele_freq = self.child_allele_freq
child_individuals = list(range(self.size))
A_allele_count = 0
for i in child_individuals:
# pick first parent
parent_one_index = random.randint(0, self.size - 1)
parent_two_index = parent_one_index
parent_one = self.parent_individuals[parent_one_index]
allele_one = parent_one.choose_allele()
# pick second parent
while parent_one_index == parent_two_index:
parent_two_index = random.randint(0, self.size - 1)
parent_two = self.parent_individuals[parent_two_index]
allele_two = parent_two.choose_allele()
child_individuals[i] = DiploidIndividual(allele_one, allele_two)
A_allele_count += allele_one
A_allele_count += allele_two
self.child_individuals = child_individuals
self.child_allele_freq = float(A_allele_count / (self.size * 2))
def get_hwe(self):
counts = [0, 0, 0]
a_freq = 1 - self.parent_allele_freq
AA_freq = self.parent_allele_freq ** 2
Aa_freq = 2 * self.parent_allele_freq * a_freq
aa_freq = a_freq ** 2
counts[0] = round(AA_freq * self.size)
counts[1] = round(Aa_freq * self.size)
counts[2] = round(self.size - (counts[0] + counts[1]))
return counts
## Class that holds diploid individual
class DiploidIndividual:
def __init__(self, allele_one, allele_two):
self.allele_one = allele_one
self.allele_two = allele_two
def choose_allele(self):
allele = 0
allele_num = random.randint(0,1)
if allele_num == 0:
allele = self.allele_one
else:
allele = self.allele_two
return allele | 24.892308 | 123 | 0.602287 |
import random
_(self, size, allele_frequency, a_advantage=0.0, A_advantage=0.0, mut_rate_a_to_A=0.0, mut_rate_A_to_a=0.0):
self.size = size
self.parent_allele_freq = float(allele_frequency)
self.child_allele_freq = 0
self.parent_individuals = self.init_individuals()
self.child_individuals = []
self.a_advantage = float(a_advantage)
self.A_advantage = float(A_advantage)
self.mut_a_to_A = float(mut_rate_a_to_A)
self.mut_A_to_a = float(mut_rate_A_to_a)
def init_individuals(self):
hwe = self.get_hwe()
individuals = list(range(self.size))
count = 0
for index in [0, 1, 2]:
allele_one = 1
allele_two = 1
num_indivs = hwe[index]
print(num_indivs)
if index == 1:
allele_one = 1
allele_two = 0
elif index == 2:
allele_one = 0
allele_two = 0
for i in range(num_indivs):
individuals[count] = DiploidIndividual(allele_one, allele_two)
count += 1
return individuals
def advance_generation(self):
if len(self.child_individuals) != 0:
self.parent_individuals = self.child_individuals[:]
self.parent_allele_freq = self.child_allele_freq
child_individuals = list(range(self.size))
A_allele_count = 0
for i in child_individuals:
parent_one_index = random.randint(0, self.size - 1)
parent_two_index = parent_one_index
parent_one = self.parent_individuals[parent_one_index]
allele_one = parent_one.choose_allele()
while parent_one_index == parent_two_index:
parent_two_index = random.randint(0, self.size - 1)
parent_two = self.parent_individuals[parent_two_index]
allele_two = parent_two.choose_allele()
child_individuals[i] = DiploidIndividual(allele_one, allele_two)
A_allele_count += allele_one
A_allele_count += allele_two
self.child_individuals = child_individuals
self.child_allele_freq = float(A_allele_count / (self.size * 2))
def get_hwe(self):
counts = [0, 0, 0]
a_freq = 1 - self.parent_allele_freq
AA_freq = self.parent_allele_freq ** 2
Aa_freq = 2 * self.parent_allele_freq * a_freq
aa_freq = a_freq ** 2
counts[0] = round(AA_freq * self.size)
counts[1] = round(Aa_freq * self.size)
counts[2] = round(self.size - (counts[0] + counts[1]))
return counts
_init__(self, allele_one, allele_two):
self.allele_one = allele_one
self.allele_two = allele_two
def choose_allele(self):
allele = 0
allele_num = random.randint(0,1)
if allele_num == 0:
allele = self.allele_one
else:
allele = self.allele_two
return allele | true | true |
f72e3ebabf440ec21aa584a65bf31aae72df3218 | 1,412 | py | Python | Recommender_System/algorithm/NeuMF/train.py | Holldean/Recommender-System | 5c1508b4fb430dc06979353627c4cb873aad490c | [
"MIT"
] | 348 | 2019-11-12T12:20:08.000Z | 2022-03-31T12:34:45.000Z | Recommender_System/algorithm/NeuMF/train.py | Runjeo/Recommender-System | 6a93e6ee970b32c76e2f71043383bf24a7e865d5 | [
"MIT"
] | 15 | 2019-12-04T15:16:15.000Z | 2021-07-21T06:27:38.000Z | Recommender_System/algorithm/NeuMF/train.py | Runjeo/Recommender-System | 6a93e6ee970b32c76e2f71043383bf24a7e865d5 | [
"MIT"
] | 87 | 2019-11-24T10:26:26.000Z | 2022-03-11T05:35:39.000Z | from Recommender_System.algorithm.NeuMF.model import NeuMF_model
from Recommender_System.algorithm.train import train, test
import tensorflow as tf
def train_with_pretrain(n_user, n_item, train_data, test_data, topk_data, gmf_dim, mlp_dim, layers, l2):
neumf_model, gmf_model, mlp_model = NeuMF_model(n_user, n_item, gmf_dim=gmf_dim, mlp_dim=mlp_dim, layers=layers, l2=l2)
print('预训练GMF部分')
train(gmf_model, train_data, test_data, topk_data, epochs=10, batch=512)
print('预训练MLP部分')
train(mlp_model, train_data, test_data, topk_data, epochs=10, batch=512)
out_kernel = tf.concat((gmf_model.get_layer('gmf_out').get_weights()[0], mlp_model.get_layer('mlp_out').get_weights()[0]), 0)
out_bias = gmf_model.get_layer('gmf_out').get_weights()[1] + mlp_model.get_layer('mlp_out').get_weights()[1]
neumf_model.get_layer('out').set_weights([out_kernel * 0.5, out_bias * 0.5])
test(neumf_model, train_data, test_data, topk_data, batch=512)
train(neumf_model, train_data, test_data, topk_data, optimizer=tf.keras.optimizers.SGD(0.0001), epochs=10, batch=512)
def train_without_pretrain(n_user, n_item, train_data, test_data, topk_data, gmf_dim, mlp_dim, layers, l2):
neumf_model, _, _ = NeuMF_model(n_user, n_item, gmf_dim=gmf_dim, mlp_dim=mlp_dim, layers=layers, l2=l2)
train(neumf_model, train_data, test_data, topk_data, epochs=10, batch=512)
| 58.833333 | 130 | 0.746459 | from Recommender_System.algorithm.NeuMF.model import NeuMF_model
from Recommender_System.algorithm.train import train, test
import tensorflow as tf
def train_with_pretrain(n_user, n_item, train_data, test_data, topk_data, gmf_dim, mlp_dim, layers, l2):
neumf_model, gmf_model, mlp_model = NeuMF_model(n_user, n_item, gmf_dim=gmf_dim, mlp_dim=mlp_dim, layers=layers, l2=l2)
print('预训练GMF部分')
train(gmf_model, train_data, test_data, topk_data, epochs=10, batch=512)
print('预训练MLP部分')
train(mlp_model, train_data, test_data, topk_data, epochs=10, batch=512)
out_kernel = tf.concat((gmf_model.get_layer('gmf_out').get_weights()[0], mlp_model.get_layer('mlp_out').get_weights()[0]), 0)
out_bias = gmf_model.get_layer('gmf_out').get_weights()[1] + mlp_model.get_layer('mlp_out').get_weights()[1]
neumf_model.get_layer('out').set_weights([out_kernel * 0.5, out_bias * 0.5])
test(neumf_model, train_data, test_data, topk_data, batch=512)
train(neumf_model, train_data, test_data, topk_data, optimizer=tf.keras.optimizers.SGD(0.0001), epochs=10, batch=512)
def train_without_pretrain(n_user, n_item, train_data, test_data, topk_data, gmf_dim, mlp_dim, layers, l2):
neumf_model, _, _ = NeuMF_model(n_user, n_item, gmf_dim=gmf_dim, mlp_dim=mlp_dim, layers=layers, l2=l2)
train(neumf_model, train_data, test_data, topk_data, epochs=10, batch=512)
| true | true |
f72e3ed1ce8a318b29f3b1bf906dfcddc22f30e6 | 807 | py | Python | main.py | ceilors/maige | bb53c3b858b57865ad8f4dc179d82fb26c12f5ce | [
"MIT"
] | null | null | null | main.py | ceilors/maige | bb53c3b858b57865ad8f4dc179d82fb26c12f5ce | [
"MIT"
] | null | null | null | main.py | ceilors/maige | bb53c3b858b57865ad8f4dc179d82fb26c12f5ce | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request, jsonify
from colors import get_main_colors
from base64 import decodebytes
from io import BytesIO
app = Flask(__name__, static_url_path='')
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
if request.form:
_, b64data = request.form['file'].split(',')
use_hsv = True if request.form['hsv'].lower() == 'true' else False
image = BytesIO(decodebytes(b64data.encode()))
colors = get_main_colors(image, use_hsv=use_hsv)
return jsonify({'status': 'ok', 'colors': colors})
else:
return jsonify({'status': 'file not uploaded!'})
else:
return render_template('index.html')
if __name__ == '__main__':
app.run(debug=True)
| 31.038462 | 78 | 0.627014 | from flask import Flask, render_template, request, jsonify
from colors import get_main_colors
from base64 import decodebytes
from io import BytesIO
app = Flask(__name__, static_url_path='')
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
if request.form:
_, b64data = request.form['file'].split(',')
use_hsv = True if request.form['hsv'].lower() == 'true' else False
image = BytesIO(decodebytes(b64data.encode()))
colors = get_main_colors(image, use_hsv=use_hsv)
return jsonify({'status': 'ok', 'colors': colors})
else:
return jsonify({'status': 'file not uploaded!'})
else:
return render_template('index.html')
if __name__ == '__main__':
app.run(debug=True)
| true | true |
f72e3eedc75ee9c53a910ab98319340f3975f69d | 10,813 | py | Python | comparxiv/comparxiv.py | lukasschwab/comparxiv | 1e422cc154c483f0b08c943f66bdd91b0f082f84 | [
"MIT"
] | 321 | 2020-04-24T14:54:08.000Z | 2022-03-07T15:36:20.000Z | comparxiv/comparxiv.py | lukasschwab/comparxiv | 1e422cc154c483f0b08c943f66bdd91b0f082f84 | [
"MIT"
] | 10 | 2020-04-23T17:26:24.000Z | 2022-02-16T07:44:11.000Z | comparxiv/comparxiv.py | lukasschwab/comparxiv | 1e422cc154c483f0b08c943f66bdd91b0f082f84 | [
"MIT"
] | 26 | 2020-04-24T22:26:51.000Z | 2022-02-19T11:18:13.000Z | #!/usr/bin/env python
import os
import sys
import shutil
import arxiv
import requests
from sys import platform
from tqdm import tqdm
from os.path import join
version = '0.1.7'
author = 'Timon Emken'
year = '2020'
temp_folder = ".temp_comparxiv"
def compare_preprints(arxiv_ID, version_a, version_b,keep_temp,show_latex_output,dont_open_pdf,dont_compare_equations):
check_arguments(arxiv_ID, version_a, version_b)
print_title(arxiv_ID, version_a, version_b)
#Check if old or new arxiv ID
if "/" in arxiv_ID:
ID_a = os.path.split(arxiv_ID)[-1]+"v"+str(version_a)
ID_b = os.path.split(arxiv_ID)[-1]+"v"+str(version_b)
else:
ID_a = arxiv_ID+"v"+str(version_a)
ID_b = arxiv_ID+"v"+str(version_b)
#Create folder for temporary files
if os.path.exists(temp_folder) == False:
os.mkdir(temp_folder)
temp_folder_a = join(temp_folder, 'temp_' + ID_a)
temp_folder_b = join(temp_folder, 'temp_' + ID_b)
diff_file = os.path.split(arxiv_ID)[-1]+"_v"+str(version_a)+"v"+str(version_b)
diff_file_tex = diff_file + ".tex"
diff_file_bbl = diff_file + ".bbl"
diff_file_pdf = diff_file + ".pdf"
print_paper_information(arxiv_ID,version_a,version_b)
#1. Download and unpack files
print("1.) Download and unpack source files:")
download_from_arxiv(arxiv_ID,version_a)
download_from_arxiv(arxiv_ID,version_b)
unpack_source_files(arxiv_ID,version_a,temp_folder_a)
unpack_source_files(arxiv_ID,version_b,temp_folder_b)
#2. Identify the .tex and .bbl files.
#2.1 tex files
print("\n2.1) Identify master tex files:")
master_file_a = identify_master_tex_file(temp_folder_a,arxiv_ID)
master_file_b = identify_master_tex_file(temp_folder_b,arxiv_ID)
#2.2 bbl files
print("\n2.2) Identify bbl files:")
bbl_file_a = identify_bbl_file(temp_folder_a,arxiv_ID)
bbl_file_b = identify_bbl_file(temp_folder_b,arxiv_ID)
#3. Latexdiff
#3.1 tex files
print("\n3.1) Run latexdiff on the tex files.")
latexdiff_command_tex = "latexdiff "
if show_latex_output == False:
latexdiff_command_tex += "--ignore-warnings "
if dont_compare_equations:
latexdiff_command_tex += "--math-markup=0 "
latexdiff_command_tex += join(temp_folder_a, master_file_a) + " " + join(temp_folder_b,master_file_b) + ">" + join(temp_folder_b, diff_file_tex)
os.system(latexdiff_command_tex)
#3.2 Try to run latexdiff on bbl.
if bbl_file_a != None and bbl_file_b != None:
print("\n3.2) Run latexdiff on the bbl files.")
latexdiff_command_bbl = "latexdiff "
if show_latex_output == False:
latexdiff_command_bbl += "--ignore-warnings "
latexdiff_command_bbl += join(temp_folder_a, bbl_file_a) + " " + join(temp_folder_b, bbl_file_b) + ">" + join(temp_folder_b, diff_file_bbl)
os.system(latexdiff_command_bbl)
#4. Run pdflatex
print("\n4.) Generate a pdf with pdflatex.")
Generate_PDF(diff_file_tex,temp_folder_b,show_latex_output)
#5. If unsuccessful, try again with a copy of the version b .bbl file.
if bbl_file_b != None and os.path.isfile( join(temp_folder_b,diff_file_pdf) ) == False:
print("\tWarning: No pdf could be generated. Copy the .bbl file of version b and try again.")
shutil.copyfile( join(temp_folder_b, bbl_file_b), join(temp_folder_b, diff_file_bbl))
Generate_PDF(diff_file_tex,temp_folder_b,show_latex_output)
success = False;
if os.path.isfile( join(temp_folder_b, diff_file_pdf)):
success = True
#6. Compare figures
# todo
#7. If successful copy the .pdf.
if success:
os.rename( join(temp_folder_b, diff_file_pdf), join(diff_file_pdf) )
if dont_open_pdf == False:
if platform == "linux" or platform == "linux2":
os.system("xdg-open "+diff_file_pdf)
elif platform == "darwin":
os.system("open "+diff_file_pdf)
elif platform == "win32":
os.startfile(diff_file_pdf)
print("\nSuccess!")
else:
print("\nFailure! No pdf file could be generated.\nTroubleshooting:")
print("\t1.) To see more terminal output run:\n\t\t'comparxiv --show_latex_output %s %i %i'" % (arxiv_ID, version_a, version_b))
print("\t2.) In some cases latex math environments cause problems with latexdiff. Try running:\n\t\t'comparxiv --dont_compare_equations %s %i %i'" % (arxiv_ID, version_a, version_b))
#8. Delete temporary files
if keep_temp == False:
shutil.rmtree(temp_folder)
return success
def print_paper_information(arxiv_ID,vA,vB):
papers = arxiv.query(query="",
id_list=[arxiv_ID + "v" + str(vA),arxiv_ID + "v" + str(vB)],
max_results=2)
if papers[0].title != papers[1].title:
print("New title:\t%s" % papers[1].title)
print("Old title:\t%s" % papers[0].title)
else:
print("Title:\t\t%s" % papers[1].title)
if len(papers[1].authors) == 1:
print("Author:\t\t%s\n" % papers[1].authors[0])
elif len(papers[1].authors) > 6:
print("Authors:\t%s et al.\n" % papers[1].authors[0])
else:
print("Authors:\t",", " . join(papers[1].authors),"\n")
def check_arguments(arxiv_ID,vA,vB):
#1. Check for identical versions
if vA == vB:
print("Error:\tVersions to compare are identical.")
os.abort()
#2. Check if paper exists and has multiple versions.
latest_version = latest_available_version(arxiv_ID)
if latest_version == 1:
print("Error: The paper [%s] has only one version." % (arxiv_ID))
os.abort()
#3. Check existence of versions: If none or only one of the versions can be found, generate some meaningful error message.
elif vA > latest_version or vB > latest_version:
if vA > latest_version and vB > latest_version:
missing_version = "v%i or v%i"%(vA,vB)
suggestion_a = latest_version-1
suggestion_b = latest_version
elif vA > latest_version:
missing_version = "v%i"%(vA)
suggestion_a = latest_version
if vB == latest_version:
suggestion_b = vB - 1
else:
suggestion_b = vB
elif vB > latest_version:
missing_version = "v%i"%(vB)
suggestion_b = latest_version
if vA == latest_version:
suggestion_a = vA - 1
else:
suggestion_a = vA
print("Error:\tThe preprint [%s] does not have a version %s. The latest available version is v%i.\n\tTry running 'comparxiv %s %i %i'." % (arxiv_ID,missing_version,latest_version,arxiv_ID,suggestion_a,suggestion_b))
os.abort()
def latest_available_version(arxiv_ID):
papers= arxiv.query(query="",
id_list=[arxiv_ID],
max_results=1)
if len(papers) == 0:
print("Error: The paper [%s] cannot be found on the preprint server." % (arxiv_ID))
os.abort()
version_max = 1
while version_max < 100:
paper = arxiv.query(query="",
id_list=[arxiv_ID+"v"+str(version_max + 1)],
max_results=1)
if len(paper) > 0 and paper[0].id.split("v")[-1] == str(version_max + 1) :
version_max += 1
else:
break
return version_max
def Generate_PDF(tex_file, folder, show_latex_output):
owd = os.getcwd()
os.chdir(folder)
pdflatex_command = "pdflatex -interaction=nonstopmode " + tex_file
if show_latex_output == False:
if platform == "win32":
pdflatex_command += " > nul 2>&1"
else:
pdflatex_command += " 2>&1 > /dev/null"
os.system(pdflatex_command)
os.system(pdflatex_command)
os.chdir(owd)
#Download the files from the preprint server, if it hasn't been done before.
def download_from_url(url, destination):
file_size = int(requests.head(url).headers["Content-Length"])
if os.path.exists(destination):
first_byte = os.path.getsize(destination)
else:
first_byte = 0
if first_byte >= file_size:
return file_size
header = {"Range": "bytes=%s-%s" % (first_byte, file_size)}
pbar = tqdm(
total=file_size, initial=first_byte,
unit='B', unit_scale=True, desc=url.split('/')[-1])
req = requests.get(url, headers=header, stream=True)
with(open(destination, 'ab')) as f:
for chunk in req.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.update(1024)
pbar.close()
return file_size
def download_from_arxiv(arxiv_ID,version):
#Check if old or new arxiv ID
if "/" in arxiv_ID:
filepath = join(temp_folder, os.path.split(arxiv_ID)[-1]+"v"+str(version))
else:
filepath = join(temp_folder, arxiv_ID+"v"+str(version))
if os.path.isfile(filepath) == False:
url="https://arxiv.org/src/"+arxiv_ID+"v"+str(version)
download_from_url(url,filepath)
else:
print("\tDownload of source files for [%sv%i] not necessary." % (arxiv_ID, version))
#Unpack the archived files to a temporary folder
def unpack_source_files(arxiv_ID,version,path_destination):
version_ID = arxiv_ID + "v" + str(version)
#Check if old or new arxiv ID
if "/" in arxiv_ID:
path_source = join(temp_folder, os.path.split(version_ID)[-1])
else:
path_source = join(temp_folder, version_ID)
# Create folder for temporary files
if os.path.isfile(path_source) and os.path.exists(path_destination) == False:
os.makedirs(path_destination)
# Unpack files
os.system('tar -xzf '+path_source +' -C '+ path_destination)
def identify_master_tex_file(path,arxiv_ID):
master_file = None
tex_files = []
files = os.listdir(path)
for file in files:
if file.endswith(".tex") and not (file.startswith(arxiv_ID) or file.startswith(os.path.split(arxiv_ID)[-1])):
tex_files.append(file)
if len(tex_files) > 1:
for file in tex_files:
with open( join(path,file) ) as f:
if 'begin{document}' in f.read():
master_file = file
break
elif len(tex_files) == 1:
master_file = tex_files[0]
elif len(tex_files) == 0 and len(files)==1:
master_file = file + ".tex"
os.rename( join(path, file), join(path, master_file))
if master_file == None:
print("Error in identify_master_tex_file(): Among the %i tex files, no master file could be identified." % len(tex_files))
os.abort()
else:
print("\t%sv%s:\t%s" % (arxiv_ID, path.split('v')[-1], master_file))
return master_file
def identify_bbl_file(path, arxiv_ID):
# Possibility a: A .bbl file exists.
for file in os.listdir(path):
if file.endswith('.bbl') and not file.startswith(arxiv_ID):
bbl_file = file
break
# Possibility b: No .bbl file exists.
else:
bbl_file = None
print("\t%sv%s:\t%s" % (arxiv_ID, path.split('v')[-1], bbl_file))
return bbl_file
def print_title(ID,v1,v2):
asci_title = " __ ___ \n ___ ___ _ __ ___ _ __ __ _ _ _\ \/ (_)_ __\n / __/ _ \| '_ ` _ \| '_ \ / _` | '__\ /| \ \ / /\n| (_| (_) | | | | | | |_) | (_| | | / \| |\ V / \n \___\___/|_| |_| |_| .__/ \__,_|_| /_/\_\_| \_/ \n |_| \n"
print(asci_title)
print("Version %s, developed by %s (%s)" % (version, author, year))
print("\nCompare [%s]: v%i vs v%i\n" % (ID,v1,v2))
if __name__ == "__main__":
arxiv_ID = str(sys.argv[1])
version_a = int(sys.argv[2])
version_b = int(sys.argv[3])
keep_temp = False
show_latex_output = False
dont_open_pdf = False
dont_compare_equations = False
compare_preprints(arxiv_ID,version_a,version_b,keep_temp,show_latex_output,dont_open_pdf,dont_compare_equations)
| 35.107143 | 328 | 0.70258 |
import os
import sys
import shutil
import arxiv
import requests
from sys import platform
from tqdm import tqdm
from os.path import join
version = '0.1.7'
author = 'Timon Emken'
year = '2020'
temp_folder = ".temp_comparxiv"
def compare_preprints(arxiv_ID, version_a, version_b,keep_temp,show_latex_output,dont_open_pdf,dont_compare_equations):
check_arguments(arxiv_ID, version_a, version_b)
print_title(arxiv_ID, version_a, version_b)
if "/" in arxiv_ID:
ID_a = os.path.split(arxiv_ID)[-1]+"v"+str(version_a)
ID_b = os.path.split(arxiv_ID)[-1]+"v"+str(version_b)
else:
ID_a = arxiv_ID+"v"+str(version_a)
ID_b = arxiv_ID+"v"+str(version_b)
if os.path.exists(temp_folder) == False:
os.mkdir(temp_folder)
temp_folder_a = join(temp_folder, 'temp_' + ID_a)
temp_folder_b = join(temp_folder, 'temp_' + ID_b)
diff_file = os.path.split(arxiv_ID)[-1]+"_v"+str(version_a)+"v"+str(version_b)
diff_file_tex = diff_file + ".tex"
diff_file_bbl = diff_file + ".bbl"
diff_file_pdf = diff_file + ".pdf"
print_paper_information(arxiv_ID,version_a,version_b)
print("1.) Download and unpack source files:")
download_from_arxiv(arxiv_ID,version_a)
download_from_arxiv(arxiv_ID,version_b)
unpack_source_files(arxiv_ID,version_a,temp_folder_a)
unpack_source_files(arxiv_ID,version_b,temp_folder_b)
print("\n2.1) Identify master tex files:")
master_file_a = identify_master_tex_file(temp_folder_a,arxiv_ID)
master_file_b = identify_master_tex_file(temp_folder_b,arxiv_ID)
print("\n2.2) Identify bbl files:")
bbl_file_a = identify_bbl_file(temp_folder_a,arxiv_ID)
bbl_file_b = identify_bbl_file(temp_folder_b,arxiv_ID)
print("\n3.1) Run latexdiff on the tex files.")
latexdiff_command_tex = "latexdiff "
if show_latex_output == False:
latexdiff_command_tex += "--ignore-warnings "
if dont_compare_equations:
latexdiff_command_tex += "--math-markup=0 "
latexdiff_command_tex += join(temp_folder_a, master_file_a) + " " + join(temp_folder_b,master_file_b) + ">" + join(temp_folder_b, diff_file_tex)
os.system(latexdiff_command_tex)
if bbl_file_a != None and bbl_file_b != None:
print("\n3.2) Run latexdiff on the bbl files.")
latexdiff_command_bbl = "latexdiff "
if show_latex_output == False:
latexdiff_command_bbl += "--ignore-warnings "
latexdiff_command_bbl += join(temp_folder_a, bbl_file_a) + " " + join(temp_folder_b, bbl_file_b) + ">" + join(temp_folder_b, diff_file_bbl)
os.system(latexdiff_command_bbl)
print("\n4.) Generate a pdf with pdflatex.")
Generate_PDF(diff_file_tex,temp_folder_b,show_latex_output)
if bbl_file_b != None and os.path.isfile( join(temp_folder_b,diff_file_pdf) ) == False:
print("\tWarning: No pdf could be generated. Copy the .bbl file of version b and try again.")
shutil.copyfile( join(temp_folder_b, bbl_file_b), join(temp_folder_b, diff_file_bbl))
Generate_PDF(diff_file_tex,temp_folder_b,show_latex_output)
success = False;
if os.path.isfile( join(temp_folder_b, diff_file_pdf)):
success = True
if success:
os.rename( join(temp_folder_b, diff_file_pdf), join(diff_file_pdf) )
if dont_open_pdf == False:
if platform == "linux" or platform == "linux2":
os.system("xdg-open "+diff_file_pdf)
elif platform == "darwin":
os.system("open "+diff_file_pdf)
elif platform == "win32":
os.startfile(diff_file_pdf)
print("\nSuccess!")
else:
print("\nFailure! No pdf file could be generated.\nTroubleshooting:")
print("\t1.) To see more terminal output run:\n\t\t'comparxiv --show_latex_output %s %i %i'" % (arxiv_ID, version_a, version_b))
print("\t2.) In some cases latex math environments cause problems with latexdiff. Try running:\n\t\t'comparxiv --dont_compare_equations %s %i %i'" % (arxiv_ID, version_a, version_b))
if keep_temp == False:
shutil.rmtree(temp_folder)
return success
def print_paper_information(arxiv_ID,vA,vB):
papers = arxiv.query(query="",
id_list=[arxiv_ID + "v" + str(vA),arxiv_ID + "v" + str(vB)],
max_results=2)
if papers[0].title != papers[1].title:
print("New title:\t%s" % papers[1].title)
print("Old title:\t%s" % papers[0].title)
else:
print("Title:\t\t%s" % papers[1].title)
if len(papers[1].authors) == 1:
print("Author:\t\t%s\n" % papers[1].authors[0])
elif len(papers[1].authors) > 6:
print("Authors:\t%s et al.\n" % papers[1].authors[0])
else:
print("Authors:\t",", " . join(papers[1].authors),"\n")
def check_arguments(arxiv_ID,vA,vB):
if vA == vB:
print("Error:\tVersions to compare are identical.")
os.abort()
latest_version = latest_available_version(arxiv_ID)
if latest_version == 1:
print("Error: The paper [%s] has only one version." % (arxiv_ID))
os.abort()
elif vA > latest_version or vB > latest_version:
if vA > latest_version and vB > latest_version:
missing_version = "v%i or v%i"%(vA,vB)
suggestion_a = latest_version-1
suggestion_b = latest_version
elif vA > latest_version:
missing_version = "v%i"%(vA)
suggestion_a = latest_version
if vB == latest_version:
suggestion_b = vB - 1
else:
suggestion_b = vB
elif vB > latest_version:
missing_version = "v%i"%(vB)
suggestion_b = latest_version
if vA == latest_version:
suggestion_a = vA - 1
else:
suggestion_a = vA
print("Error:\tThe preprint [%s] does not have a version %s. The latest available version is v%i.\n\tTry running 'comparxiv %s %i %i'." % (arxiv_ID,missing_version,latest_version,arxiv_ID,suggestion_a,suggestion_b))
os.abort()
def latest_available_version(arxiv_ID):
papers= arxiv.query(query="",
id_list=[arxiv_ID],
max_results=1)
if len(papers) == 0:
print("Error: The paper [%s] cannot be found on the preprint server." % (arxiv_ID))
os.abort()
version_max = 1
while version_max < 100:
paper = arxiv.query(query="",
id_list=[arxiv_ID+"v"+str(version_max + 1)],
max_results=1)
if len(paper) > 0 and paper[0].id.split("v")[-1] == str(version_max + 1) :
version_max += 1
else:
break
return version_max
def Generate_PDF(tex_file, folder, show_latex_output):
owd = os.getcwd()
os.chdir(folder)
pdflatex_command = "pdflatex -interaction=nonstopmode " + tex_file
if show_latex_output == False:
if platform == "win32":
pdflatex_command += " > nul 2>&1"
else:
pdflatex_command += " 2>&1 > /dev/null"
os.system(pdflatex_command)
os.system(pdflatex_command)
os.chdir(owd)
def download_from_url(url, destination):
file_size = int(requests.head(url).headers["Content-Length"])
if os.path.exists(destination):
first_byte = os.path.getsize(destination)
else:
first_byte = 0
if first_byte >= file_size:
return file_size
header = {"Range": "bytes=%s-%s" % (first_byte, file_size)}
pbar = tqdm(
total=file_size, initial=first_byte,
unit='B', unit_scale=True, desc=url.split('/')[-1])
req = requests.get(url, headers=header, stream=True)
with(open(destination, 'ab')) as f:
for chunk in req.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.update(1024)
pbar.close()
return file_size
def download_from_arxiv(arxiv_ID,version):
#Check if old or new arxiv ID
if "/" in arxiv_ID:
filepath = join(temp_folder, os.path.split(arxiv_ID)[-1]+"v"+str(version))
else:
filepath = join(temp_folder, arxiv_ID+"v"+str(version))
if os.path.isfile(filepath) == False:
url="https://arxiv.org/src/"+arxiv_ID+"v"+str(version)
download_from_url(url,filepath)
else:
print("\tDownload of source files for [%sv%i] not necessary." % (arxiv_ID, version))
#Unpack the archived files to a temporary folder
def unpack_source_files(arxiv_ID,version,path_destination):
version_ID = arxiv_ID + "v" + str(version)
#Check if old or new arxiv ID
if "/" in arxiv_ID:
path_source = join(temp_folder, os.path.split(version_ID)[-1])
else:
path_source = join(temp_folder, version_ID)
# Create folder for temporary files
if os.path.isfile(path_source) and os.path.exists(path_destination) == False:
os.makedirs(path_destination)
# Unpack files
os.system('tar -xzf '+path_source +' -C '+ path_destination)
def identify_master_tex_file(path,arxiv_ID):
master_file = None
tex_files = []
files = os.listdir(path)
for file in files:
if file.endswith(".tex") and not (file.startswith(arxiv_ID) or file.startswith(os.path.split(arxiv_ID)[-1])):
tex_files.append(file)
if len(tex_files) > 1:
for file in tex_files:
with open( join(path,file) ) as f:
if 'begin{document}' in f.read():
master_file = file
break
elif len(tex_files) == 1:
master_file = tex_files[0]
elif len(tex_files) == 0 and len(files)==1:
master_file = file + ".tex"
os.rename( join(path, file), join(path, master_file))
if master_file == None:
print("Error in identify_master_tex_file(): Among the %i tex files, no master file could be identified." % len(tex_files))
os.abort()
else:
print("\t%sv%s:\t%s" % (arxiv_ID, path.split('v')[-1], master_file))
return master_file
def identify_bbl_file(path, arxiv_ID):
# Possibility a: A .bbl file exists.
for file in os.listdir(path):
if file.endswith('.bbl') and not file.startswith(arxiv_ID):
bbl_file = file
break
# Possibility b: No .bbl file exists.
else:
bbl_file = None
print("\t%sv%s:\t%s" % (arxiv_ID, path.split('v')[-1], bbl_file))
return bbl_file
def print_title(ID,v1,v2):
asci_title = " __ ___ \n ___ ___ _ __ ___ _ __ __ _ _ _\ \/ (_)_ __\n / __/ _ \| '_ ` _ \| '_ \ / _` | '__\ /| \ \ / /\n| (_| (_) | | | | | | |_) | (_| | | / \| |\ V / \n \___\___/|_| |_| |_| .__/ \__,_|_| /_/\_\_| \_/ \n |_| \n"
print(asci_title)
print("Version %s, developed by %s (%s)" % (version, author, year))
print("\nCompare [%s]: v%i vs v%i\n" % (ID,v1,v2))
if __name__ == "__main__":
arxiv_ID = str(sys.argv[1])
version_a = int(sys.argv[2])
version_b = int(sys.argv[3])
keep_temp = False
show_latex_output = False
dont_open_pdf = False
dont_compare_equations = False
compare_preprints(arxiv_ID,version_a,version_b,keep_temp,show_latex_output,dont_open_pdf,dont_compare_equations)
| true | true |
f72e3f4721a941cae58767e3a41507699843bbef | 1,483 | py | Python | Day 05/password_generator.py | Dheer08/100-days-of-code | 05d0e5e6613f924ab083e13f28a7a0446bd34434 | [
"MIT"
] | null | null | null | Day 05/password_generator.py | Dheer08/100-days-of-code | 05d0e5e6613f924ab083e13f28a7a0446bd34434 | [
"MIT"
] | null | null | null | Day 05/password_generator.py | Dheer08/100-days-of-code | 05d0e5e6613f924ab083e13f28a7a0446bd34434 | [
"MIT"
] | null | null | null | import random
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
symbols = ['!', '#', '$', '%', '&', '(', ')', '*', '+']
print("Welcome to PyPassword Generator")
num_letters = int(input("Enter No.letters would you like in PyPassword : "))
num_symbols = int(input("Enter No.symbols would you like in PyPassword : "))
num_numbers = int(input("Enter No.numbers would you like in PyPassword : "))
# Easy version - fhfh^&23 (order not randomized)
password = ""
for char in range(1,num_letters+1):
password += random.choice(letters)
for char in range(1,num_symbols+1):
password += random.choice(symbols)
for char in range(1,num_numbers+1):
password += random.choice(numbers)
# print("password")
# Advanced version - g^2j8k& (random order)
password_list = []
for char in range(1,num_letters+1):
password_list.append(random.choice(letters))
for char in range(1,num_symbols+1):
password_list.append(random.choice(symbols))
for char in range(1,num_numbers+1):
password_list.append(random.choice(numbers))
# print(password_list)
random.shuffle(password_list)
# print(password_list)
password = ""
for char in password_list :
password += char
print(f"Your Password : {password}") | 38.025641 | 270 | 0.597438 | import random
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
symbols = ['!', '#', '$', '%', '&', '(', ')', '*', '+']
print("Welcome to PyPassword Generator")
num_letters = int(input("Enter No.letters would you like in PyPassword : "))
num_symbols = int(input("Enter No.symbols would you like in PyPassword : "))
num_numbers = int(input("Enter No.numbers would you like in PyPassword : "))
password = ""
for char in range(1,num_letters+1):
password += random.choice(letters)
for char in range(1,num_symbols+1):
password += random.choice(symbols)
for char in range(1,num_numbers+1):
password += random.choice(numbers)
password_list = []
for char in range(1,num_letters+1):
password_list.append(random.choice(letters))
for char in range(1,num_symbols+1):
password_list.append(random.choice(symbols))
for char in range(1,num_numbers+1):
password_list.append(random.choice(numbers))
random.shuffle(password_list)
password = ""
for char in password_list :
password += char
print(f"Your Password : {password}") | true | true |
f72e3f481b6ee7faa4b350db4971d27d5f501b51 | 2,523 | py | Python | tests/components/dynalite/common.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 6 | 2020-07-18T16:33:25.000Z | 2021-09-26T09:52:04.000Z | tests/components/dynalite/common.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 56 | 2020-08-03T07:30:54.000Z | 2022-03-31T06:02:04.000Z | tests/components/dynalite/common.py | klauern/home-assistant-core | c18ba6aec0627e6afb6442c678edb5ff2bb17db6 | [
"Apache-2.0"
] | 5 | 2020-03-29T00:29:13.000Z | 2021-09-06T20:58:40.000Z | """Common functions for tests."""
from homeassistant.components import dynalite
from homeassistant.helpers import entity_registry
from tests.async_mock import AsyncMock, Mock, call, patch
from tests.common import MockConfigEntry
ATTR_SERVICE = "service"
ATTR_METHOD = "method"
ATTR_ARGS = "args"
def create_mock_device(platform, spec):
"""Create a dynalite mock device for a platform according to a spec."""
device = Mock(spec=spec)
device.category = platform
device.unique_id = "UNIQUE"
device.name = "NAME"
device.device_class = "Device Class"
return device
async def get_entry_id_from_hass(hass):
"""Get the config entry id from hass."""
ent_reg = await entity_registry.async_get_registry(hass)
assert ent_reg
conf_entries = hass.config_entries.async_entries(dynalite.DOMAIN)
assert len(conf_entries) == 1
return conf_entries[0].entry_id
async def create_entity_from_device(hass, device):
"""Set up the component and platform and create a light based on the device provided."""
host = "1.2.3.4"
entry = MockConfigEntry(domain=dynalite.DOMAIN, data={dynalite.CONF_HOST: host})
entry.add_to_hass(hass)
with patch(
"homeassistant.components.dynalite.bridge.DynaliteDevices"
) as mock_dyn_dev:
mock_dyn_dev().async_setup = AsyncMock(return_value=True)
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
new_device_func = mock_dyn_dev.mock_calls[1][2]["new_device_func"]
new_device_func([device])
await hass.async_block_till_done()
return mock_dyn_dev.mock_calls[1][2]["update_device_func"]
async def run_service_tests(hass, device, platform, services):
"""Run a series of service calls and check that the entity and device behave correctly."""
for cur_item in services:
service = cur_item[ATTR_SERVICE]
args = cur_item.get(ATTR_ARGS, {})
service_data = {"entity_id": f"{platform}.name", **args}
await hass.services.async_call(platform, service, service_data, blocking=True)
await hass.async_block_till_done()
for check_item in services:
check_method = getattr(device, check_item[ATTR_METHOD])
if check_item[ATTR_SERVICE] == service:
check_method.assert_called_once()
assert check_method.mock_calls == [call(**args)]
check_method.reset_mock()
else:
check_method.assert_not_called()
| 38.815385 | 94 | 0.705906 | from homeassistant.components import dynalite
from homeassistant.helpers import entity_registry
from tests.async_mock import AsyncMock, Mock, call, patch
from tests.common import MockConfigEntry
ATTR_SERVICE = "service"
ATTR_METHOD = "method"
ATTR_ARGS = "args"
def create_mock_device(platform, spec):
device = Mock(spec=spec)
device.category = platform
device.unique_id = "UNIQUE"
device.name = "NAME"
device.device_class = "Device Class"
return device
async def get_entry_id_from_hass(hass):
ent_reg = await entity_registry.async_get_registry(hass)
assert ent_reg
conf_entries = hass.config_entries.async_entries(dynalite.DOMAIN)
assert len(conf_entries) == 1
return conf_entries[0].entry_id
async def create_entity_from_device(hass, device):
host = "1.2.3.4"
entry = MockConfigEntry(domain=dynalite.DOMAIN, data={dynalite.CONF_HOST: host})
entry.add_to_hass(hass)
with patch(
"homeassistant.components.dynalite.bridge.DynaliteDevices"
) as mock_dyn_dev:
mock_dyn_dev().async_setup = AsyncMock(return_value=True)
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
new_device_func = mock_dyn_dev.mock_calls[1][2]["new_device_func"]
new_device_func([device])
await hass.async_block_till_done()
return mock_dyn_dev.mock_calls[1][2]["update_device_func"]
async def run_service_tests(hass, device, platform, services):
for cur_item in services:
service = cur_item[ATTR_SERVICE]
args = cur_item.get(ATTR_ARGS, {})
service_data = {"entity_id": f"{platform}.name", **args}
await hass.services.async_call(platform, service, service_data, blocking=True)
await hass.async_block_till_done()
for check_item in services:
check_method = getattr(device, check_item[ATTR_METHOD])
if check_item[ATTR_SERVICE] == service:
check_method.assert_called_once()
assert check_method.mock_calls == [call(**args)]
check_method.reset_mock()
else:
check_method.assert_not_called()
| true | true |
f72e3fc70899c2619f3a52ac0851c7d574b246cd | 78 | py | Python | app/db.py | AntonOni/my-flasktodo | 8e4ea7aca25e0b0ea11d4d2ea3fb322b55cfcd3a | [
"MIT"
] | null | null | null | app/db.py | AntonOni/my-flasktodo | 8e4ea7aca25e0b0ea11d4d2ea3fb322b55cfcd3a | [
"MIT"
] | 2 | 2021-03-25T23:44:31.000Z | 2022-03-29T22:01:18.000Z | app/db.py | AntonOni/my-flasktodo | 8e4ea7aca25e0b0ea11d4d2ea3fb322b55cfcd3a | [
"MIT"
] | null | null | null | import dataset
db = dataset.connect('sqlite:///tasks.db')
tasks = db['tasks'] | 19.5 | 42 | 0.692308 | import dataset
db = dataset.connect('sqlite:///tasks.db')
tasks = db['tasks'] | true | true |
f72e407aacff036f6a27dabd01cc1a84b4e4a8ab | 2,502 | py | Python | test_nerf.py | dingjr27/nerf | b0e0554022f66d65705d3134c4cfdd71429eb574 | [
"MIT"
] | null | null | null | test_nerf.py | dingjr27/nerf | b0e0554022f66d65705d3134c4cfdd71429eb574 | [
"MIT"
] | null | null | null | test_nerf.py | dingjr27/nerf | b0e0554022f66d65705d3134c4cfdd71429eb574 | [
"MIT"
] | null | null | null | import os, sys
# os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
sys.path.append(r'/home/luca/Desktop/NERFPosit/Inference')
import numpy as np
import imageio
import json
import random
import time
import pprint
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
import run_nerf
from load_llff import load_llff_data
from load_deepvoxels import load_dv_data
from load_blender import load_blender_data
basedir = './logs'
expname = 'fern_example'
config = os.path.join(basedir, expname, 'config.txt')
print('Args:')
print(open(config, 'r').read())
parser = run_nerf.config_parser()
args = parser.parse_args('--config {} --ft_path {}'.format(config, os.path.join(basedir, expname, 'model_200000.npy')))
print('loaded args')
images, poses, bds, render_poses, i_test = load_llff_data(args.datadir, args.factor,
recenter=True, bd_factor=.75,
spherify=args.spherify)
H, W, focal = poses[0,:3,-1].astype(np.float32)
H = int(H)
W = int(W)
hwf = [H, W, focal]
images = images.astype(np.float32)
poses = poses.astype(np.float32)
if args.no_ndc:
near = tf.reduce_min(bds) * .9
far = tf.reduce_max(bds) * 1.
else:
near = 0.
far = 1.
# Create nerf model
_, render_kwargs_test, start, grad_vars, models = run_nerf.create_nerf(args)
print(models['model'].input)
model = models['model']
print(model.summary())
#extractor = keras.Model(inputs=model.inputs,
# outputs=model.layers[1].output)
#embed_fn, input_ch = run_nerf.get_embedder(10,1)
#embed_fn1, input_ch = run_nerf.get_embedder(4,1)
#a = embed_fn(tf.constant([[0.5,0.5,0.5]]))
#b = embed_fn1(tf.constant([[0.5,0.5,0.5]]))
#c = tf.concat([a,b],1)
#print(c.shape)
#print(extractor.predict(c))
#exit(0)
#features = extractor()
bds_dict = {
'near' : tf.cast(near, tf.float32),
'far' : tf.cast(far, tf.float32),
}
render_kwargs_test.update(bds_dict)
print('Render kwargs:')
pprint.pprint(render_kwargs_test)
down = 4
render_kwargs_fast = {k : render_kwargs_test[k] for k in render_kwargs_test}
render_kwargs_fast['N_importance'] = 0
c2w = np.eye(4)[:3,:4].astype(np.float32) # identity pose matrix
test = run_nerf.render(H//down, W//down, focal/down, c2w=c2w, **render_kwargs_fast)
img = np.clip(test[0],0,1)
plt.imshow(img)
plt.show()
| 26.336842 | 119 | 0.684652 | import os, sys
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
sys.path.append(r'/home/luca/Desktop/NERFPosit/Inference')
import numpy as np
import imageio
import json
import random
import time
import pprint
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
import run_nerf
from load_llff import load_llff_data
from load_deepvoxels import load_dv_data
from load_blender import load_blender_data
basedir = './logs'
expname = 'fern_example'
config = os.path.join(basedir, expname, 'config.txt')
print('Args:')
print(open(config, 'r').read())
parser = run_nerf.config_parser()
args = parser.parse_args('--config {} --ft_path {}'.format(config, os.path.join(basedir, expname, 'model_200000.npy')))
print('loaded args')
images, poses, bds, render_poses, i_test = load_llff_data(args.datadir, args.factor,
recenter=True, bd_factor=.75,
spherify=args.spherify)
H, W, focal = poses[0,:3,-1].astype(np.float32)
H = int(H)
W = int(W)
hwf = [H, W, focal]
images = images.astype(np.float32)
poses = poses.astype(np.float32)
if args.no_ndc:
near = tf.reduce_min(bds) * .9
far = tf.reduce_max(bds) * 1.
else:
near = 0.
far = 1.
_, render_kwargs_test, start, grad_vars, models = run_nerf.create_nerf(args)
print(models['model'].input)
model = models['model']
print(model.summary())
bds_dict = {
'near' : tf.cast(near, tf.float32),
'far' : tf.cast(far, tf.float32),
}
render_kwargs_test.update(bds_dict)
print('Render kwargs:')
pprint.pprint(render_kwargs_test)
down = 4
render_kwargs_fast = {k : render_kwargs_test[k] for k in render_kwargs_test}
render_kwargs_fast['N_importance'] = 0
c2w = np.eye(4)[:3,:4].astype(np.float32)
test = run_nerf.render(H//down, W//down, focal/down, c2w=c2w, **render_kwargs_fast)
img = np.clip(test[0],0,1)
plt.imshow(img)
plt.show()
| true | true |
f72e414bf6bdf8d43884107e4bbb78774081d65f | 262 | py | Python | languages/python3/japronto/main.py | jcnaud/snippet | 10db24e2a648af29c51f6bc3a083ffe86e11ae5c | [
"Apache-2.0"
] | 5 | 2018-01-18T10:08:50.000Z | 2020-05-01T04:18:02.000Z | languages/python3/japronto/main.py | jcnaud/snippet | 10db24e2a648af29c51f6bc3a083ffe86e11ae5c | [
"Apache-2.0"
] | null | null | null | languages/python3/japronto/main.py | jcnaud/snippet | 10db24e2a648af29c51f6bc3a083ffe86e11ae5c | [
"Apache-2.0"
] | null | null | null | # source : https://github.com/squeaky-pl/japronto/blob/master/tutorial/1_hello.md
from japronto import Application
def hello(request):
return request.Response(text='Hello world!')
app = Application()
app.router.add_route('/', hello)
app.run(debug=True)
| 20.153846 | 81 | 0.744275 |
from japronto import Application
def hello(request):
return request.Response(text='Hello world!')
app = Application()
app.router.add_route('/', hello)
app.run(debug=True)
| true | true |
f72e41538ac04b1bddab04878d05b47c34f51b7d | 958 | py | Python | code_week1_426_430/search_in_rotated_sorted_array.py | dylanlee101/leetcode | b059afdadb83d504e62afd1227107de0b59557af | [
"Apache-2.0"
] | null | null | null | code_week1_426_430/search_in_rotated_sorted_array.py | dylanlee101/leetcode | b059afdadb83d504e62afd1227107de0b59557af | [
"Apache-2.0"
] | null | null | null | code_week1_426_430/search_in_rotated_sorted_array.py | dylanlee101/leetcode | b059afdadb83d504e62afd1227107de0b59557af | [
"Apache-2.0"
] | null | null | null | '''
假设按照升序排序的数组在预先未知的某个点上进行了旋转。
( 例如,数组 [0,1,2,4,5,6,7] 可能变为 [4,5,6,7,0,1,2] )。
搜索一个给定的目标值,如果数组中存在这个目标值,则返回它的索引,否则返回 -1 。
你可以假设数组中不存在重复的元素。
你的算法时间复杂度必须是 O(log n) 级别。
示例 1:
输入: nums = [4,5,6,7,0,1,2], target = 0
输出: 4
示例 2:
输入: nums = [4,5,6,7,0,1,2], target = 3
输出: -1
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/search-in-rotated-sorted-array
'''
class Solution:
def search(self, nums: List[int], target: int) -> int:
l = 0
r = len(nums) -1
while (l <= r):
mid = (l+r) >> 1
if target == nums[mid]:
return mid
if nums[l]<= nums[mid]:
if target >= nums[l] and target < nums[mid]:
r = mid -1
else:
l = mid + 1
else:
if target > nums[mid] and target <= nums[r]:
l = mid + 1
else:
r = mid - 1
return -1 | 22.809524 | 66 | 0.462422 | class Solution:
def search(self, nums: List[int], target: int) -> int:
l = 0
r = len(nums) -1
while (l <= r):
mid = (l+r) >> 1
if target == nums[mid]:
return mid
if nums[l]<= nums[mid]:
if target >= nums[l] and target < nums[mid]:
r = mid -1
else:
l = mid + 1
else:
if target > nums[mid] and target <= nums[r]:
l = mid + 1
else:
r = mid - 1
return -1 | true | true |
f72e41a742163eb3e707b174caea0f06b81bd15e | 1,881 | py | Python | third_party/blink/renderer/bindings/scripts/bind_gen/codegen_accumulator.py | sarang-apps/darshan_browser | 173649bb8a7c656dc60784d19e7bb73e07c20daa | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | third_party/blink/renderer/bindings/scripts/bind_gen/codegen_accumulator.py | sarang-apps/darshan_browser | 173649bb8a7c656dc60784d19e7bb73e07c20daa | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | third_party/blink/renderer/bindings/scripts/bind_gen/codegen_accumulator.py | sarang-apps/darshan_browser | 173649bb8a7c656dc60784d19e7bb73e07c20daa | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class CodeGenAccumulator(object):
"""
Accumulates a variety of information and helps generate code based on the
information.
"""
def __init__(self):
# Headers to be included
self._include_headers = set()
# Forward declarations of C++ class
self._class_decls = set()
# Forward declarations of C++ struct
self._struct_decls = set()
def total_size(self):
return (len(self.include_headers) + len(self.class_decls) + len(
self.struct_decls))
@property
def include_headers(self):
return self._include_headers
def add_include_header(self, header):
self._include_headers.add(header)
def add_include_headers(self, headers):
self._include_headers.update(headers)
@staticmethod
def require_include_headers(headers):
return lambda accumulator: accumulator.add_include_headers(headers)
@property
def class_decls(self):
return self._class_decls
def add_class_decl(self, class_name):
self._class_decls.add(class_name)
def add_class_decls(self, class_names):
self._class_decls.update(class_names)
@staticmethod
def require_class_decls(class_names):
return lambda accumulator: accumulator.add_class_decls(class_names)
@property
def struct_decls(self):
return self._struct_decls
def add_struct_decl(self, struct_name):
self._struct_decls.add(struct_name)
def add_struct_decls(self, struct_names):
self._struct_decls.update(struct_names)
@staticmethod
def require_struct_decls(struct_names):
return lambda accumulator: accumulator.add_struct_decls(struct_names)
| 28.938462 | 77 | 0.701223 |
class CodeGenAccumulator(object):
def __init__(self):
self._include_headers = set()
self._class_decls = set()
self._struct_decls = set()
def total_size(self):
return (len(self.include_headers) + len(self.class_decls) + len(
self.struct_decls))
@property
def include_headers(self):
return self._include_headers
def add_include_header(self, header):
self._include_headers.add(header)
def add_include_headers(self, headers):
self._include_headers.update(headers)
@staticmethod
def require_include_headers(headers):
return lambda accumulator: accumulator.add_include_headers(headers)
@property
def class_decls(self):
return self._class_decls
def add_class_decl(self, class_name):
self._class_decls.add(class_name)
def add_class_decls(self, class_names):
self._class_decls.update(class_names)
@staticmethod
def require_class_decls(class_names):
return lambda accumulator: accumulator.add_class_decls(class_names)
@property
def struct_decls(self):
return self._struct_decls
def add_struct_decl(self, struct_name):
self._struct_decls.add(struct_name)
def add_struct_decls(self, struct_names):
self._struct_decls.update(struct_names)
@staticmethod
def require_struct_decls(struct_names):
return lambda accumulator: accumulator.add_struct_decls(struct_names)
| true | true |
f72e42b4141b3b01016c081715c4244055b83088 | 3,517 | py | Python | assets/tools/asset_audit.py | yiya-core/yiya-core | 54bdc5c72f6d760cb3ec840f202c289bccd03ccd | [
"MIT"
] | null | null | null | assets/tools/asset_audit.py | yiya-core/yiya-core | 54bdc5c72f6d760cb3ec840f202c289bccd03ccd | [
"MIT"
] | null | null | null | assets/tools/asset_audit.py | yiya-core/yiya-core | 54bdc5c72f6d760cb3ec840f202c289bccd03ccd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Script to audit the assets
# Reads the asset (amount has all issuances)
# Reads the balances in every address for the asset.
# Compares the two numbers to checks that qty of all assets are accounted for
import subprocess
import json
#Set this to your yiya-cli program
cli = "yiya-cli"
mode = "-testnet"
rpc_port = 15591
#mode = "-regtest"
#rpc_port = 15491
#Set this information in your yiya.conf file (in datadir, not testnet3)
rpc_user = 'rpcuser'
rpc_pass = 'rpcpass555'
def listassets(filter):
rpc_connection = get_rpc_connection()
result = rpc_connection.listassets(filter, True)
return(result)
def listaddressesbyasset(asset, bool, number, number2):
rpc_connection = get_rpc_connection()
result = rpc_connection.listaddressesbyasset(asset, bool, number, number2)
return(result)
def rpc_call(params):
process = subprocess.Popen([cli, mode, params], stdout=subprocess.PIPE)
out, err = process.communicate()
return(out)
def generate_blocks(n):
rpc_connection = get_rpc_connection()
hashes = rpc_connection.generate(n)
return(hashes)
def get_rpc_connection():
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
connection = "http://%s:%s@127.0.0.1:%s"%(rpc_user, rpc_pass, rpc_port)
#print("Connection: " + connection)
rpc_connection = AuthServiceProxy(connection)
return(rpc_connection)
def audit(filter):
assets = listassets(filter)
print("Auditing: " + filter)
#print(assets)
print("Asset count: " + str(len(assets)))
count = 0
max_dist_asset_name = ""
max_dist_address_count = 0
for asset, properties in assets.items():
count=count+1
total_issued = 0
total_for_asset = 0
print("Auditing asset (" + str(count) + "): " + asset)
for key, value in properties.items():
if (key == 'amount'):
total_issued += value
print("Total issued for " + asset + " is: " + str(value))
loop = True
loop_count = 0
number_of_addresses = 0
while loop:
# This call returns a max of 50000 items at a time
address_qtys = listaddressesbyasset(asset, False, 50000, loop_count * 50000)
number_of_addresses += len(address_qtys)
for address, qty in address_qtys.items():
#print(address + " -> " + str(qty))
total_for_asset += qty
# If the number of address is less than 50000, end the loop
if len(address_qtys) < 50000:
loop = False
loop_count += 1
print("Total in addresses for asset " + asset + " is " + str(total_for_asset))
# Calculate stats
if number_of_addresses > max_dist_address_count:
max_dist_asset_name = asset
max_dist_address_count = number_of_addresses
if (total_issued == total_for_asset):
print("Audit PASSED for " + asset)
print("")
else:
print("Audit FAILED for " + asset)
exit()
if len(assets) == count:
print("All " + str(len(assets)) + " assets audited.")
print("Stats:")
print(" Max Distribed Asset: " + max_dist_asset_name + " with " + str(max_dist_address_count) + " addresses.")
if mode == "-regtest": #If regtest then mine our own blocks
import os
os.system(cli + " " + mode + " generate 400")
audit("*") #Set to "*" for all.
| 29.805085 | 123 | 0.627239 |
import subprocess
import json
cli = "yiya-cli"
mode = "-testnet"
rpc_port = 15591
rpc_user = 'rpcuser'
rpc_pass = 'rpcpass555'
def listassets(filter):
rpc_connection = get_rpc_connection()
result = rpc_connection.listassets(filter, True)
return(result)
def listaddressesbyasset(asset, bool, number, number2):
rpc_connection = get_rpc_connection()
result = rpc_connection.listaddressesbyasset(asset, bool, number, number2)
return(result)
def rpc_call(params):
process = subprocess.Popen([cli, mode, params], stdout=subprocess.PIPE)
out, err = process.communicate()
return(out)
def generate_blocks(n):
rpc_connection = get_rpc_connection()
hashes = rpc_connection.generate(n)
return(hashes)
def get_rpc_connection():
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
connection = "http://%s:%s@127.0.0.1:%s"%(rpc_user, rpc_pass, rpc_port)
rpc_connection = AuthServiceProxy(connection)
return(rpc_connection)
def audit(filter):
assets = listassets(filter)
print("Auditing: " + filter)
print("Asset count: " + str(len(assets)))
count = 0
max_dist_asset_name = ""
max_dist_address_count = 0
for asset, properties in assets.items():
count=count+1
total_issued = 0
total_for_asset = 0
print("Auditing asset (" + str(count) + "): " + asset)
for key, value in properties.items():
if (key == 'amount'):
total_issued += value
print("Total issued for " + asset + " is: " + str(value))
loop = True
loop_count = 0
number_of_addresses = 0
while loop:
address_qtys = listaddressesbyasset(asset, False, 50000, loop_count * 50000)
number_of_addresses += len(address_qtys)
for address, qty in address_qtys.items():
total_for_asset += qty
if len(address_qtys) < 50000:
loop = False
loop_count += 1
print("Total in addresses for asset " + asset + " is " + str(total_for_asset))
if number_of_addresses > max_dist_address_count:
max_dist_asset_name = asset
max_dist_address_count = number_of_addresses
if (total_issued == total_for_asset):
print("Audit PASSED for " + asset)
print("")
else:
print("Audit FAILED for " + asset)
exit()
if len(assets) == count:
print("All " + str(len(assets)) + " assets audited.")
print("Stats:")
print(" Max Distribed Asset: " + max_dist_asset_name + " with " + str(max_dist_address_count) + " addresses.")
if mode == "-regtest":
import os
os.system(cli + " " + mode + " generate 400")
audit("*")
| true | true |
f72e42e386695a450d722a3a775545996c37216f | 3,796 | py | Python | vio/vio/swagger/utils.py | onap/multicloud-openstack-vmware | 53fd67e55f54c66b29e0eb5ab792e80d16ffff20 | [
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2021-10-15T16:47:11.000Z | 2021-10-15T16:47:11.000Z | vio/vio/swagger/utils.py | onap/multicloud-openstack-vmware | 53fd67e55f54c66b29e0eb5ab792e80d16ffff20 | [
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2020-02-11T22:14:45.000Z | 2020-02-11T22:14:45.000Z | vio/vio/swagger/utils.py | onap/multicloud-openstack-vmware | 53fd67e55f54c66b29e0eb5ab792e80d16ffff20 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
def get_swagger_json_data():
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.flavor.swagger.json')
f = open(json_file)
json_data = json.JSONDecoder().decode(f.read())
f.close()
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.image.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.network.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.subnet.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.server.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.volume.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.vport.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.tenant.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.host.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.limit.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_data["basePath"] = "/api/multicloud-vio/v0/"
json_data["info"]["title"] = "MultiVIM driver \
of OpenStack VIO Service NBI"
return json_data
| 41.714286 | 77 | 0.688883 |
import json
import os
def get_swagger_json_data():
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.flavor.swagger.json')
f = open(json_file)
json_data = json.JSONDecoder().decode(f.read())
f.close()
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.image.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.network.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.subnet.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.server.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.volume.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.vport.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.tenant.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.host.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.limit.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_data["basePath"] = "/api/multicloud-vio/v0/"
json_data["info"]["title"] = "MultiVIM driver \
of OpenStack VIO Service NBI"
return json_data
| true | true |
f72e432ddad2c586e45b0ee110568ffaa11c4951 | 1,825 | py | Python | test/test_tenants.py | rubelw/auth0_client | 51e68239babcf7c40e40491d1aaa3f8547a67f63 | [
"MIT"
] | 2 | 2020-10-08T21:42:56.000Z | 2021-03-21T08:17:52.000Z | test/test_tenants.py | rubelw/auth0_client | 51e68239babcf7c40e40491d1aaa3f8547a67f63 | [
"MIT"
] | null | null | null | test/test_tenants.py | rubelw/auth0_client | 51e68239babcf7c40e40491d1aaa3f8547a67f63 | [
"MIT"
] | null | null | null | import json
import sys
import unittest
from contextlib import contextmanager
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import StringIO
from mock import patch
from auth0_client.Auth0Client import Auth0Client as class_to_test
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
class TestTenants(unittest.TestCase):
"""
Test command class
"""
@patch('sys.exit')
@patch('auth0_client.v3.management.tenants.Tenants.get')
def test_get_tenant_settings(self, stats, exit):
stats.return_value='123'
debug = False
exit.return_value=None
config_dict = {}
config_dict['debug'] = debug
config_dict['domain'] = 'test'
config_dict['client_id'] = 'id'
config_dict['client_secret'] = 'secret'
client= class_to_test(config_dict)
real_results = client.get_tenants(
)
self.assertEqual('"123"', real_results)
@patch('sys.exit')
@patch('auth0_client.v3.management.tenants.Tenants.update')
def test_update_tenant_settings(self, stats, exit):
stats.return_value='123'
debug = False
exit.return_value=None
config_dict = {}
config_dict['debug'] = debug
config_dict['domain'] = 'test'
config_dict['client_id'] = 'id'
config_dict['client_secret'] = 'secret'
client= class_to_test(config_dict)
body='{"123":"xxx"}'
real_results = client.update_tenant_settings(
body=body
)
self.assertEqual('"123"', real_results)
| 25.347222 | 65 | 0.642192 | import json
import sys
import unittest
from contextlib import contextmanager
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import StringIO
from mock import patch
from auth0_client.Auth0Client import Auth0Client as class_to_test
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
class TestTenants(unittest.TestCase):
@patch('sys.exit')
@patch('auth0_client.v3.management.tenants.Tenants.get')
def test_get_tenant_settings(self, stats, exit):
stats.return_value='123'
debug = False
exit.return_value=None
config_dict = {}
config_dict['debug'] = debug
config_dict['domain'] = 'test'
config_dict['client_id'] = 'id'
config_dict['client_secret'] = 'secret'
client= class_to_test(config_dict)
real_results = client.get_tenants(
)
self.assertEqual('"123"', real_results)
@patch('sys.exit')
@patch('auth0_client.v3.management.tenants.Tenants.update')
def test_update_tenant_settings(self, stats, exit):
stats.return_value='123'
debug = False
exit.return_value=None
config_dict = {}
config_dict['debug'] = debug
config_dict['domain'] = 'test'
config_dict['client_id'] = 'id'
config_dict['client_secret'] = 'secret'
client= class_to_test(config_dict)
body='{"123":"xxx"}'
real_results = client.update_tenant_settings(
body=body
)
self.assertEqual('"123"', real_results)
| true | true |
f72e4627ab0f3f4cac8d2b3cf9858e52763b8499 | 1,163 | py | Python | __tests__/integration/mocks/main.py | tetrascience/ts-sdk-python | 05a5034f99bc73ea456a16332ecd26ce7c403dad | [
"Apache-2.0"
] | 1 | 2022-01-19T19:38:49.000Z | 2022-01-19T19:38:49.000Z | __tests__/integration/mocks/main.py | tetrascience/ts-sdk-python | 05a5034f99bc73ea456a16332ecd26ce7c403dad | [
"Apache-2.0"
] | 17 | 2021-03-17T07:47:07.000Z | 2022-03-28T18:49:06.000Z | __tests__/integration/mocks/main.py | tetrascience/ts-sdk-python | 05a5034f99bc73ea456a16332ecd26ce7c403dad | [
"Apache-2.0"
] | 1 | 2021-03-17T08:06:25.000Z | 2021-03-17T08:06:25.000Z | from ts_sdk import task
def test_log(input, context: task.Context):
logger = context.get_logger()
logger.log({
"message": "Hello from test function!",
"level": "info"
})
def test_file_operations(input, context: task.Context):
file = context.write_file(b'test-content', input.get('newFileName'), 'PROCESSED')
file = context.add_attributes(file, {'k1': 'v1'}, ['t1'], [{'name': 'label_name', 'value': 'label_value'}])
result = context.read_file(file)
assert result['body'] == b'test-content', 'read_file content differs from provided in write_file'
def test_secrets(input, context: task.Context):
secret_value = context.resolve_secret(input.get('pass'))
assert secret_value == 'secret-password-value', f'test_secrets failed. Got {secret_value}'
assert context.resolve_secret('anything') == 'anything'
def test_search_eql(input, context: task.Context):
assert context.search_eql({'query': {}}) == {}
def test_all(input, context: task.Context):
test_log(input, context)
test_file_operations(input, context)
test_secrets(input, context)
test_search_eql(input, context)
return True
| 38.766667 | 111 | 0.695615 | from ts_sdk import task
def test_log(input, context: task.Context):
logger = context.get_logger()
logger.log({
"message": "Hello from test function!",
"level": "info"
})
def test_file_operations(input, context: task.Context):
file = context.write_file(b'test-content', input.get('newFileName'), 'PROCESSED')
file = context.add_attributes(file, {'k1': 'v1'}, ['t1'], [{'name': 'label_name', 'value': 'label_value'}])
result = context.read_file(file)
assert result['body'] == b'test-content', 'read_file content differs from provided in write_file'
def test_secrets(input, context: task.Context):
secret_value = context.resolve_secret(input.get('pass'))
assert secret_value == 'secret-password-value', f'test_secrets failed. Got {secret_value}'
assert context.resolve_secret('anything') == 'anything'
def test_search_eql(input, context: task.Context):
assert context.search_eql({'query': {}}) == {}
def test_all(input, context: task.Context):
test_log(input, context)
test_file_operations(input, context)
test_secrets(input, context)
test_search_eql(input, context)
return True
| true | true |
f72e4686ac76fb9adca1ea4b64215bfc51049ddf | 2,208 | py | Python | zerver/webhooks/wordpress/view.py | Supermanu/zulip | 26f6d708c2e30cfe50d9d61031edb759e8117596 | [
"Apache-2.0"
] | null | null | null | zerver/webhooks/wordpress/view.py | Supermanu/zulip | 26f6d708c2e30cfe50d9d61031edb759e8117596 | [
"Apache-2.0"
] | 15 | 2020-06-05T18:44:15.000Z | 2022-03-11T23:26:03.000Z | zerver/webhooks/wordpress/view.py | Supermanu/zulip | 26f6d708c2e30cfe50d9d61031edb759e8117596 | [
"Apache-2.0"
] | null | null | null | # Webhooks for external integrations.
from __future__ import absolute_import
from django.utils.translation import ugettext as _
from django.http import HttpRequest, HttpResponse
from zerver.models import get_client, UserProfile
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from six import text_type
PUBLISH_POST_OR_PAGE_TEMPLATE = 'New {type} published.\n[{title}]({url})'
USER_REGISTER_TEMPLATE = 'New blog user registered.\nName: {name}\nemail: {email}'
WP_LOGIN_TEMPLATE = 'User {name} logged in.'
@api_key_only_webhook_view("Wordpress")
@has_request_variables
def api_wordpress_webhook(request, user_profile,
stream=REQ(default="wordpress"),
topic=REQ(default="WordPress Notification"),
hook=REQ(default="WordPress Action"),
post_title=REQ(default="New WordPress Post"),
post_type=REQ(default="post"),
post_url=REQ(default="WordPress Post URL"),
display_name=REQ(default="New User Name"),
user_email=REQ(default="New User Email"),
user_login=REQ(default="Logged in User")):
# type: (HttpRequest, UserProfile, text_type, text_type, text_type, text_type, text_type, text_type, text_type, text_type, text_type) -> HttpResponse
# remove trailing whitespace (issue for some test fixtures)
hook = hook.rstrip()
if hook == 'publish_post' or hook == 'publish_page':
data = PUBLISH_POST_OR_PAGE_TEMPLATE.format(type=post_type, title=post_title, url=post_url)
elif hook == 'user_register':
data = USER_REGISTER_TEMPLATE.format(name=display_name, email=user_email)
elif hook == 'wp_login':
data = WP_LOGIN_TEMPLATE.format(name=user_login)
else:
return json_error(_("Unknown WordPress webhook action: " + hook))
check_send_message(user_profile, get_client("ZulipWordPressWebhook"), "stream",
[stream], topic, data)
return json_success()
| 46 | 153 | 0.680254 |
from __future__ import absolute_import
from django.utils.translation import ugettext as _
from django.http import HttpRequest, HttpResponse
from zerver.models import get_client, UserProfile
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from six import text_type
PUBLISH_POST_OR_PAGE_TEMPLATE = 'New {type} published.\n[{title}]({url})'
USER_REGISTER_TEMPLATE = 'New blog user registered.\nName: {name}\nemail: {email}'
WP_LOGIN_TEMPLATE = 'User {name} logged in.'
@api_key_only_webhook_view("Wordpress")
@has_request_variables
def api_wordpress_webhook(request, user_profile,
stream=REQ(default="wordpress"),
topic=REQ(default="WordPress Notification"),
hook=REQ(default="WordPress Action"),
post_title=REQ(default="New WordPress Post"),
post_type=REQ(default="post"),
post_url=REQ(default="WordPress Post URL"),
display_name=REQ(default="New User Name"),
user_email=REQ(default="New User Email"),
user_login=REQ(default="Logged in User")):
hook = hook.rstrip()
if hook == 'publish_post' or hook == 'publish_page':
data = PUBLISH_POST_OR_PAGE_TEMPLATE.format(type=post_type, title=post_title, url=post_url)
elif hook == 'user_register':
data = USER_REGISTER_TEMPLATE.format(name=display_name, email=user_email)
elif hook == 'wp_login':
data = WP_LOGIN_TEMPLATE.format(name=user_login)
else:
return json_error(_("Unknown WordPress webhook action: " + hook))
check_send_message(user_profile, get_client("ZulipWordPressWebhook"), "stream",
[stream], topic, data)
return json_success()
| true | true |
f72e46d6af97d1838fd40d7621427e1174fa05e9 | 771 | py | Python | pytz_timezone_field/models/fields.py | mkoistinen/django-pytz-timezone-field | 4f558923f7d2a884eeed2def29a5690336598668 | [
"MIT"
] | null | null | null | pytz_timezone_field/models/fields.py | mkoistinen/django-pytz-timezone-field | 4f558923f7d2a884eeed2def29a5690336598668 | [
"MIT"
] | null | null | null | pytz_timezone_field/models/fields.py | mkoistinen/django-pytz-timezone-field | 4f558923f7d2a884eeed2def29a5690336598668 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.db.models.fields import CharField
from ..forms.fields import TimeZoneInputField
class TimeZoneField(CharField):
"""
A relatively dynamic TimeZone field for Django models.
"""
def __init__(self, *args, **kwargs):
# Note, as of this writing, the max length of the pytz timezone choices
# is 30 characters.
kwargs.setdefault('max_length', 63)
super().__init__(*args, **kwargs)
def formfield(self, **kwargs):
# Use the companion TimeZoneInputField by default, note, the super()
# call is quite intentionally bypassing our parent class.
return super(CharField, self).formfield(**{
'form_class': TimeZoneInputField,
**kwargs,
})
| 32.125 | 79 | 0.64332 |
from django.db.models.fields import CharField
from ..forms.fields import TimeZoneInputField
class TimeZoneField(CharField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('max_length', 63)
super().__init__(*args, **kwargs)
def formfield(self, **kwargs):
return super(CharField, self).formfield(**{
'form_class': TimeZoneInputField,
**kwargs,
})
| true | true |
f72e48d376dbe7b2b813c7e29cfe0902f94f05f9 | 2,192 | py | Python | fire/cli/niv/_luk_sag.py | larsnaesbye/FIRE | 1d2c65bf890391ebd0bff4bf41689e88f4d89704 | [
"MIT"
] | 4 | 2021-02-01T11:04:48.000Z | 2022-03-11T19:14:59.000Z | fire/cli/niv/_luk_sag.py | larsnaesbye/FIRE | 1d2c65bf890391ebd0bff4bf41689e88f4d89704 | [
"MIT"
] | 256 | 2020-05-06T21:30:10.000Z | 2022-03-24T10:23:56.000Z | fire/cli/niv/_luk_sag.py | larsnaesbye/FIRE | 1d2c65bf890391ebd0bff4bf41689e88f4d89704 | [
"MIT"
] | 11 | 2020-05-04T07:08:27.000Z | 2022-01-05T11:34:31.000Z | from io import BytesIO
from zipfile import ZipFile
import click
import fire.cli
from fire.api.model import (
Sagsevent,
SagseventInfo,
SagseventInfoMateriale,
EventType,
)
from fire.io.regneark import arkdef
from . import (
find_faneblad,
find_sag,
niv,
bekræft,
er_projekt_okay,
)
@niv.command()
@fire.cli.default_options()
@click.argument(
"projektnavn",
nargs=1,
type=str,
)
def luk_sag(projektnavn: str, **kwargs) -> None:
"""Luk sag i databasen"""
er_projekt_okay(projektnavn)
sag = find_sag(projektnavn)
# Find sagsmateriale og zip det for let indlæsning i databasen
sagsmaterialer = [f"{projektnavn}.xlsx"]
filoversigt = find_faneblad(projektnavn, "Filoversigt", arkdef.FILOVERSIGT)
sagsmaterialer.extend(list(filoversigt["Filnavn"]))
zipped = BytesIO()
with ZipFile(zipped, "w") as zipobj:
for fil in sagsmaterialer:
zipobj.write(fil)
# Tilføj materiale til sagsevent
sagsevent = Sagsevent(
sag=sag,
eventtype=EventType.KOMMENTAR,
sagseventinfos=[
SagseventInfo(
beskrivelse=f"Sagsmateriale for {projektnavn}",
materialer=[SagseventInfoMateriale(materiale=zipped.getvalue())],
),
],
)
fire.cli.firedb.indset_sagsevent(sagsevent, commit=False)
fire.cli.firedb.luk_sag(sag, commit=False)
try:
# Indsæt alle objekter i denne session
fire.cli.firedb.session.flush()
except:
# rul tilbage hvis databasen smider en exception
fire.cli.firedb.session.rollback()
fire.cli.print(
f"Der opstod en fejl - sag {sag.id} for '{projektnavn}' IKKE lukket!"
)
else:
spørgsmål = click.style(
f"Er du sikker på at du vil lukke sagen {projektnavn}?",
bg="red",
fg="white",
)
if bekræft(spørgsmål):
fire.cli.firedb.session.commit()
fire.cli.print(f"Sag {sag.id} for '{projektnavn}' lukket!")
else:
fire.cli.firedb.session.rollback()
fire.cli.print(f"Sag {sag.id} for '{projektnavn}' IKKE lukket!")
| 27.4 | 81 | 0.624544 | from io import BytesIO
from zipfile import ZipFile
import click
import fire.cli
from fire.api.model import (
Sagsevent,
SagseventInfo,
SagseventInfoMateriale,
EventType,
)
from fire.io.regneark import arkdef
from . import (
find_faneblad,
find_sag,
niv,
bekræft,
er_projekt_okay,
)
@niv.command()
@fire.cli.default_options()
@click.argument(
"projektnavn",
nargs=1,
type=str,
)
def luk_sag(projektnavn: str, **kwargs) -> None:
er_projekt_okay(projektnavn)
sag = find_sag(projektnavn)
sagsmaterialer = [f"{projektnavn}.xlsx"]
filoversigt = find_faneblad(projektnavn, "Filoversigt", arkdef.FILOVERSIGT)
sagsmaterialer.extend(list(filoversigt["Filnavn"]))
zipped = BytesIO()
with ZipFile(zipped, "w") as zipobj:
for fil in sagsmaterialer:
zipobj.write(fil)
sagsevent = Sagsevent(
sag=sag,
eventtype=EventType.KOMMENTAR,
sagseventinfos=[
SagseventInfo(
beskrivelse=f"Sagsmateriale for {projektnavn}",
materialer=[SagseventInfoMateriale(materiale=zipped.getvalue())],
),
],
)
fire.cli.firedb.indset_sagsevent(sagsevent, commit=False)
fire.cli.firedb.luk_sag(sag, commit=False)
try:
fire.cli.firedb.session.flush()
except:
fire.cli.firedb.session.rollback()
fire.cli.print(
f"Der opstod en fejl - sag {sag.id} for '{projektnavn}' IKKE lukket!"
)
else:
spørgsmål = click.style(
f"Er du sikker på at du vil lukke sagen {projektnavn}?",
bg="red",
fg="white",
)
if bekræft(spørgsmål):
fire.cli.firedb.session.commit()
fire.cli.print(f"Sag {sag.id} for '{projektnavn}' lukket!")
else:
fire.cli.firedb.session.rollback()
fire.cli.print(f"Sag {sag.id} for '{projektnavn}' IKKE lukket!")
| true | true |
f72e4918b12a62bd6be858f4ee40afb0815cf68f | 1,851 | py | Python | device/kckr.py | smstuebe/kckr | 1806baf34d329a033008acd6a8b79ffafd151bb1 | [
"MIT"
] | null | null | null | device/kckr.py | smstuebe/kckr | 1806baf34d329a033008acd6a8b79ffafd151bb1 | [
"MIT"
] | 1 | 2019-05-22T07:22:33.000Z | 2019-05-22T07:22:33.000Z | device/kckr.py | smstuebe/kckr | 1806baf34d329a033008acd6a8b79ffafd151bb1 | [
"MIT"
] | null | null | null | import threading
from time import sleep
import collections
import requests
import json
import argparse
import configparser
from sensors.sensors import Sensors
from backend.backend import Backend
parser = argparse.ArgumentParser(description="Kicker activity indicator.")
parser.add_argument('--debug', action='store_const',
const=True, default=False,
help='Listen to the debugger.')
parser.add_argument('--verbose', action='store_const',
const=True, default=False,
help='Verbose output mode.')
args = parser.parse_args()
if args.debug:
import ptvsd
ptvsd.enable_attach(address=('192.168.178.27', 1337), redirect_output=True)
ptvsd.wait_for_attach()
config = configparser.ConfigParser()
config.read("config.ini")
# TODO: make dynamic
# TODO: validate
# TODO: make sensor ports configurable
print("Started kckr for location: %s" % (config["device"]["location"]))
sensors = Sensors()
backend = Backend(config)
try:
num = 0
occupied = None
sensors.start()
while True:
print("Occupied %s" % (sensors.occupation.isOccupied))
if sensors.air.hasValues():
print("Temperature %.02f°C" % (sensors.air.temperature))
print("Humidity %.02f%%" % (sensors.air.humidity))
num += 1
if occupied != sensors.occupation.isOccupied:
occupied = sensors.occupation.isOccupied
backend.updateOccupation(occupied)
if num == 15 and sensors.air.hasValues():
backend.updateEnvironmentData(
occupied, sensors.air.temperature, sensors.air.humidity)
num = 0
sleep(1)
except KeyboardInterrupt:
sensors.stop()
# for entry in loudness.history:
# print("{time:07}: {value: >3}".format(time=entry[0], value=entry[1]))
| 28.476923 | 79 | 0.653701 | import threading
from time import sleep
import collections
import requests
import json
import argparse
import configparser
from sensors.sensors import Sensors
from backend.backend import Backend
parser = argparse.ArgumentParser(description="Kicker activity indicator.")
parser.add_argument('--debug', action='store_const',
const=True, default=False,
help='Listen to the debugger.')
parser.add_argument('--verbose', action='store_const',
const=True, default=False,
help='Verbose output mode.')
args = parser.parse_args()
if args.debug:
import ptvsd
ptvsd.enable_attach(address=('192.168.178.27', 1337), redirect_output=True)
ptvsd.wait_for_attach()
config = configparser.ConfigParser()
config.read("config.ini")
print("Started kckr for location: %s" % (config["device"]["location"]))
sensors = Sensors()
backend = Backend(config)
try:
num = 0
occupied = None
sensors.start()
while True:
print("Occupied %s" % (sensors.occupation.isOccupied))
if sensors.air.hasValues():
print("Temperature %.02f°C" % (sensors.air.temperature))
print("Humidity %.02f%%" % (sensors.air.humidity))
num += 1
if occupied != sensors.occupation.isOccupied:
occupied = sensors.occupation.isOccupied
backend.updateOccupation(occupied)
if num == 15 and sensors.air.hasValues():
backend.updateEnvironmentData(
occupied, sensors.air.temperature, sensors.air.humidity)
num = 0
sleep(1)
except KeyboardInterrupt:
sensors.stop()
| true | true |
f72e498c73fddceb72cebc0a84291e6fbbe0cdb0 | 417 | py | Python | math/0x06-multivariate_prob/3-main.py | kyeeh/holbertonschool-machine_learning | 8e4894c2b036ec7f4750de5bf99b95aee5b94449 | [
"MIT"
] | null | null | null | math/0x06-multivariate_prob/3-main.py | kyeeh/holbertonschool-machine_learning | 8e4894c2b036ec7f4750de5bf99b95aee5b94449 | [
"MIT"
] | null | null | null | math/0x06-multivariate_prob/3-main.py | kyeeh/holbertonschool-machine_learning | 8e4894c2b036ec7f4750de5bf99b95aee5b94449 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
if __name__ == '__main__':
import numpy as np
from multinormal import MultiNormal
np.random.seed(0)
data = np.random.multivariate_normal([12, 30, 10], [[36, -30, 15], [-30, 100, -20], [15, -20, 25]], 10000).T
mn = MultiNormal(data)
x = np.random.multivariate_normal([12, 30, 10], [[36, -30, 15], [-30, 100, -20], [15, -20, 25]], 1).T
print(x)
print(mn.pdf(x))
| 32.076923 | 112 | 0.58753 |
if __name__ == '__main__':
import numpy as np
from multinormal import MultiNormal
np.random.seed(0)
data = np.random.multivariate_normal([12, 30, 10], [[36, -30, 15], [-30, 100, -20], [15, -20, 25]], 10000).T
mn = MultiNormal(data)
x = np.random.multivariate_normal([12, 30, 10], [[36, -30, 15], [-30, 100, -20], [15, -20, 25]], 1).T
print(x)
print(mn.pdf(x))
| true | true |
f72e4a86777f74860825078d88f210956daaf989 | 2,722 | py | Python | azure/mgmt/network/v2017_08_01/models/route_filter.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | azure/mgmt/network/v2017_08_01/models/route_filter.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | azure/mgmt/network/v2017_08_01/models/route_filter.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class RouteFilter(Resource):
"""Route Filter Resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict
:param rules: Collection of RouteFilterRules contained within a route
filter.
:type rules: list of :class:`RouteFilterRule
<azure.mgmt.network.v2017_08_01.models.RouteFilterRule>`
:param peerings: A collection of references to express route circuit
peerings.
:type peerings: list of :class:`ExpressRouteCircuitPeering
<azure.mgmt.network.v2017_08_01.models.ExpressRouteCircuitPeering>`
:ivar provisioning_state: The provisioning state of the resource. Possible
values are: 'Updating', 'Deleting', 'Succeeded' and 'Failed'.
:vartype provisioning_state: str
:ivar etag: Gets a unique read-only string that changes whenever the
resource is updated.
:vartype etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'rules': {'key': 'properties.rules', 'type': '[RouteFilterRule]'},
'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCircuitPeering]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, location=None, tags=None, rules=None, peerings=None):
super(RouteFilter, self).__init__(id=id, location=location, tags=tags)
self.rules = rules
self.peerings = peerings
self.provisioning_state = None
self.etag = None
| 37.805556 | 91 | 0.609478 |
from .resource import Resource
class RouteFilter(Resource):
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'rules': {'key': 'properties.rules', 'type': '[RouteFilterRule]'},
'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCircuitPeering]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, location=None, tags=None, rules=None, peerings=None):
super(RouteFilter, self).__init__(id=id, location=location, tags=tags)
self.rules = rules
self.peerings = peerings
self.provisioning_state = None
self.etag = None
| true | true |
f72e4a8b3e4d7ff67bbca7884b9b872ee6346fc9 | 4,369 | py | Python | google/ads/googleads/v4/resources/types/custom_interest.py | batardo/google-ads-python | a39748521847e85138fca593f3be2681352ad024 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v4/resources/types/custom_interest.py | batardo/google-ads-python | a39748521847e85138fca593f3be2681352ad024 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v4/resources/types/custom_interest.py | batardo/google-ads-python | a39748521847e85138fca593f3be2681352ad024 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v4.enums.types import custom_interest_member_type
from google.ads.googleads.v4.enums.types import custom_interest_status
from google.ads.googleads.v4.enums.types import custom_interest_type
from google.protobuf import wrappers_pb2 as wrappers # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v4.resources",
marshal="google.ads.googleads.v4",
manifest={"CustomInterest", "CustomInterestMember",},
)
class CustomInterest(proto.Message):
r"""A custom interest. This is a list of users by interest.
Attributes:
resource_name (str):
Immutable. The resource name of the custom interest. Custom
interest resource names have the form:
``customers/{customer_id}/customInterests/{custom_interest_id}``
id (google.protobuf.wrappers_pb2.Int64Value):
Output only. Id of the custom interest.
status (google.ads.googleads.v4.enums.types.CustomInterestStatusEnum.CustomInterestStatus):
Status of this custom interest. Indicates
whether the custom interest is enabled or
removed.
name (google.protobuf.wrappers_pb2.StringValue):
Name of the custom interest. It should be
unique across the same custom affinity audience.
This field is required for create operations.
type_ (google.ads.googleads.v4.enums.types.CustomInterestTypeEnum.CustomInterestType):
Type of the custom interest, CUSTOM_AFFINITY or
CUSTOM_INTENT. By default the type is set to
CUSTOM_AFFINITY.
description (google.protobuf.wrappers_pb2.StringValue):
Description of this custom interest audience.
members (Sequence[google.ads.googleads.v4.resources.types.CustomInterestMember]):
List of custom interest members that this
custom interest is composed of. Members can be
added during CustomInterest creation. If members
are presented in UPDATE operation, existing
members will be overridden.
"""
resource_name = proto.Field(proto.STRING, number=1)
id = proto.Field(proto.MESSAGE, number=2, message=wrappers.Int64Value,)
status = proto.Field(
proto.ENUM,
number=3,
enum=custom_interest_status.CustomInterestStatusEnum.CustomInterestStatus,
)
name = proto.Field(proto.MESSAGE, number=4, message=wrappers.StringValue,)
type_ = proto.Field(
proto.ENUM,
number=5,
enum=custom_interest_type.CustomInterestTypeEnum.CustomInterestType,
)
description = proto.Field(
proto.MESSAGE, number=6, message=wrappers.StringValue,
)
members = proto.RepeatedField(
proto.MESSAGE, number=7, message="CustomInterestMember",
)
class CustomInterestMember(proto.Message):
r"""A member of custom interest audience. A member can be a
keyword or url. It is immutable, that is, it can only be created
or removed but not changed.
Attributes:
member_type (google.ads.googleads.v4.enums.types.CustomInterestMemberTypeEnum.CustomInterestMemberType):
The type of custom interest member, KEYWORD
or URL.
parameter (google.protobuf.wrappers_pb2.StringValue):
Keyword text when member_type is KEYWORD or URL string when
member_type is URL.
"""
member_type = proto.Field(
proto.ENUM,
number=1,
enum=custom_interest_member_type.CustomInterestMemberTypeEnum.CustomInterestMemberType,
)
parameter = proto.Field(
proto.MESSAGE, number=2, message=wrappers.StringValue,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 38.663717 | 112 | 0.703593 |
import proto
from google.ads.googleads.v4.enums.types import custom_interest_member_type
from google.ads.googleads.v4.enums.types import custom_interest_status
from google.ads.googleads.v4.enums.types import custom_interest_type
from google.protobuf import wrappers_pb2 as wrappers
__protobuf__ = proto.module(
package="google.ads.googleads.v4.resources",
marshal="google.ads.googleads.v4",
manifest={"CustomInterest", "CustomInterestMember",},
)
class CustomInterest(proto.Message):
resource_name = proto.Field(proto.STRING, number=1)
id = proto.Field(proto.MESSAGE, number=2, message=wrappers.Int64Value,)
status = proto.Field(
proto.ENUM,
number=3,
enum=custom_interest_status.CustomInterestStatusEnum.CustomInterestStatus,
)
name = proto.Field(proto.MESSAGE, number=4, message=wrappers.StringValue,)
type_ = proto.Field(
proto.ENUM,
number=5,
enum=custom_interest_type.CustomInterestTypeEnum.CustomInterestType,
)
description = proto.Field(
proto.MESSAGE, number=6, message=wrappers.StringValue,
)
members = proto.RepeatedField(
proto.MESSAGE, number=7, message="CustomInterestMember",
)
class CustomInterestMember(proto.Message):
member_type = proto.Field(
proto.ENUM,
number=1,
enum=custom_interest_member_type.CustomInterestMemberTypeEnum.CustomInterestMemberType,
)
parameter = proto.Field(
proto.MESSAGE, number=2, message=wrappers.StringValue,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| true | true |
f72e4aa9ccd18adba05969c7078086aac4d097e9 | 1,184 | py | Python | jdcloud_sdk/services/jdccs/apis/DescribeIdcOverviewRequest.py | Tanc009/jdcloud-sdk-python | 8b045c99bc5b73ca7348e950b6f01e03a27982f5 | [
"Apache-2.0"
] | 14 | 2018-04-19T09:53:56.000Z | 2022-01-27T06:05:48.000Z | jdcloud_sdk/services/jdccs/apis/DescribeIdcOverviewRequest.py | Tanc009/jdcloud-sdk-python | 8b045c99bc5b73ca7348e950b6f01e03a27982f5 | [
"Apache-2.0"
] | 15 | 2018-09-11T05:39:54.000Z | 2021-07-02T12:38:02.000Z | jdcloud_sdk/services/jdccs/apis/DescribeIdcOverviewRequest.py | Tanc009/jdcloud-sdk-python | 8b045c99bc5b73ca7348e950b6f01e03a27982f5 | [
"Apache-2.0"
] | 33 | 2018-04-20T05:29:16.000Z | 2022-02-17T09:10:05.000Z | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class DescribeIdcOverviewRequest(JDCloudRequest):
"""
查询机房资源概览
"""
def __init__(self, parameters, header=None, version="v1"):
super(DescribeIdcOverviewRequest, self).__init__(
'/idcs/{idc}/overview', 'GET', header, version)
self.parameters = parameters
class DescribeIdcOverviewParameters(object):
def __init__(self, idc, ):
"""
:param idc: IDC机房ID
"""
self.idc = idc
| 28.190476 | 75 | 0.709459 |
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class DescribeIdcOverviewRequest(JDCloudRequest):
def __init__(self, parameters, header=None, version="v1"):
super(DescribeIdcOverviewRequest, self).__init__(
'/idcs/{idc}/overview', 'GET', header, version)
self.parameters = parameters
class DescribeIdcOverviewParameters(object):
def __init__(self, idc, ):
self.idc = idc
| true | true |
f72e4b6ab3bb941e242246f3ae5d7b65f579b2bb | 166 | py | Python | examples/permissioned-voting/assets/permissioned-voting-clear.py | Lumene98/algo-builder | b718661d064862fcf72c67589f0b5a6e48a1e7cd | [
"Apache-2.0"
] | 16 | 2021-05-15T00:23:47.000Z | 2022-03-07T18:59:54.000Z | examples/permissioned-voting/assets/permissioned-voting-clear.py | Lumene98/algo-builder | b718661d064862fcf72c67589f0b5a6e48a1e7cd | [
"Apache-2.0"
] | 8 | 2021-03-30T18:23:53.000Z | 2022-01-08T23:48:19.000Z | examples/permissioned-voting/assets/permissioned-voting-clear.py | Lumene98/algo-builder | b718661d064862fcf72c67589f0b5a6e48a1e7cd | [
"Apache-2.0"
] | 3 | 2021-09-03T20:42:02.000Z | 2022-03-03T17:21:15.000Z | from pyteal import *
def clear_state_program():
return Return(Int(1))
if __name__ == "__main__":
print(compileTeal(clear_state_program(), Mode.Application)) | 23.714286 | 63 | 0.73494 | from pyteal import *
def clear_state_program():
return Return(Int(1))
if __name__ == "__main__":
print(compileTeal(clear_state_program(), Mode.Application)) | true | true |
f72e4c1fc8c43dd026395cd356909eccee491b99 | 272 | py | Python | Gathered CTF writeups/ptr-yudai-writeups/2019/picoCTF_2019/for/like1000/solve.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:00:41.000Z | 2022-03-27T06:00:41.000Z | Gathered CTF writeups/ptr-yudai-writeups/2019/picoCTF_2019/for/like1000/solve.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | null | null | null | Gathered CTF writeups/ptr-yudai-writeups/2019/picoCTF_2019/for/like1000/solve.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:01:42.000Z | 2022-03-27T06:01:42.000Z | import tarfile
import os
x = 1000
while x > 0:
print(x)
if not tarfile.is_tarfile("{}.tar".format(x)):
break
with tarfile.open("{}.tar".format(x)) as tar:
tar.extractall('./')
if x < 1000:
os.unlink("{}.tar".format(x))
x -= 1
| 18.133333 | 50 | 0.536765 | import tarfile
import os
x = 1000
while x > 0:
print(x)
if not tarfile.is_tarfile("{}.tar".format(x)):
break
with tarfile.open("{}.tar".format(x)) as tar:
tar.extractall('./')
if x < 1000:
os.unlink("{}.tar".format(x))
x -= 1
| true | true |
f72e4f1d7832b90b3a9381e99472a22906e78302 | 1,273 | py | Python | setup.py | wingechr/pystache-cli | 8a44ab393c7120f1acd09ff03bb69c24a6705581 | [
"CC0-1.0"
] | null | null | null | setup.py | wingechr/pystache-cli | 8a44ab393c7120f1acd09ff03bb69c24a6705581 | [
"CC0-1.0"
] | null | null | null | setup.py | wingechr/pystache-cli | 8a44ab393c7120f1acd09ff03bb69c24a6705581 | [
"CC0-1.0"
] | null | null | null | from setuptools import setup
if __name__ == "__main__":
with open("README.md", encoding="utf-8") as file:
long_description = file.read()
setup(
packages=['pystache_cli'],
keywords=["cli", "pystache", "template"],
install_requires=["pystache"],
name='pystache-cli',
description="Extended command line client for pystache",
long_description=long_description,
long_description_content_type="text/markdown", # text/markdown or text/x-rst or text/plain
version="0.3.4",
author="Christian Winger",
author_email="c@wingechr.de",
url="https://github.com/wingechr/pystache-cli",
download_url="https://github.com/wingechr/pystache-cli",
platforms=["any"],
license="Public Domain",
project_urls={"Bug Tracker": "https://github.com/wingechr/pystache-cli",},
classifiers=[
"Programming Language :: Python :: 3",
"License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication",
"Operating System :: OS Independent",
],
entry_points={
"console_scripts": ["pystache-cli = pystache_cli.pystache_cli:main"]
},
package_data={"tests": ["data/**"]},
)
| 37.441176 | 99 | 0.601728 | from setuptools import setup
if __name__ == "__main__":
with open("README.md", encoding="utf-8") as file:
long_description = file.read()
setup(
packages=['pystache_cli'],
keywords=["cli", "pystache", "template"],
install_requires=["pystache"],
name='pystache-cli',
description="Extended command line client for pystache",
long_description=long_description,
long_description_content_type="text/markdown",
version="0.3.4",
author="Christian Winger",
author_email="c@wingechr.de",
url="https://github.com/wingechr/pystache-cli",
download_url="https://github.com/wingechr/pystache-cli",
platforms=["any"],
license="Public Domain",
project_urls={"Bug Tracker": "https://github.com/wingechr/pystache-cli",},
classifiers=[
"Programming Language :: Python :: 3",
"License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication",
"Operating System :: OS Independent",
],
entry_points={
"console_scripts": ["pystache-cli = pystache_cli.pystache_cli:main"]
},
package_data={"tests": ["data/**"]},
)
| true | true |
f72e4f337e5d46230215fadd2cf839bd8d810a33 | 1,904 | py | Python | client/pipe_mic.py | HubertReX/jasper-client | a161d5ad593f9f5b87535ed84643629fc5cb1138 | [
"JasPer-2.0",
"Unlicense",
"MIT"
] | null | null | null | client/pipe_mic.py | HubertReX/jasper-client | a161d5ad593f9f5b87535ed84643629fc5cb1138 | [
"JasPer-2.0",
"Unlicense",
"MIT"
] | null | null | null | client/pipe_mic.py | HubertReX/jasper-client | a161d5ad593f9f5b87535ed84643629fc5cb1138 | [
"JasPer-2.0",
"Unlicense",
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# a może teraz?
"""
A drop-in replacement for the Mic class that gets input from
named pipe. It can be anny source, by here the goal is to
communicate with www flask server, where voice recognition
is done through chrome browser. We get plain text with
no need for stt engine (chrome browser uses google engine).
"""
import re
import alteration
import os
import io
import str_formater
PIPE_NAME = '/home/osmc/flask/jasper_pipe_mic'
class Mic:
prev = None
def __init__(self, speaker, passive_stt_engine, active_stt_engine, logger):
self.speaker = speaker
self.first_run = True
#self.passive_stt_engine = passive_stt_engine
#self.active_stt_engine = active_stt_engine
self.logger = logger
try:
if not os.path.exists(PIPE_NAME):
os.mkfifo(PIPE_NAME)
self.pipein = io.open(PIPE_NAME, 'r') #, "utf-8"
except:
self.logger.error("error preparing named pipe", exc_info=True)
exit(1)
return
def passiveListen(self, PERSONA):
return True, "JASPER"
def activeListen(self, THRESHOLD=None, LISTEN=True, MUSIC=False):
if self.first_run:
self.first_run = False
return ""
if not LISTEN:
return self.prev
stop = False
while not stop:
input = self.pipein.readline()[:-1]
if input:
stop = True
input = str_formater.unicodeToUTF8(input, self.logger)
self.prev = input
return input
def say(self, phrase, OPTIONS=None):
#phrase = phrase.decode('utf8')
#print "JAN: " + phrase
self.logger.info(">>>>>>>>>>>>>>>>>>>")
self.logger.info("JAN: " + phrase )
self.logger.info(">>>>>>>>>>>>>>>>>>>")
phrase = alteration.clean(phrase)
self.speaker.say(phrase)
| 30.709677 | 79 | 0.602941 |
import re
import alteration
import os
import io
import str_formater
PIPE_NAME = '/home/osmc/flask/jasper_pipe_mic'
class Mic:
prev = None
def __init__(self, speaker, passive_stt_engine, active_stt_engine, logger):
self.speaker = speaker
self.first_run = True
self.logger = logger
try:
if not os.path.exists(PIPE_NAME):
os.mkfifo(PIPE_NAME)
self.pipein = io.open(PIPE_NAME, 'r')
except:
self.logger.error("error preparing named pipe", exc_info=True)
exit(1)
return
def passiveListen(self, PERSONA):
return True, "JASPER"
def activeListen(self, THRESHOLD=None, LISTEN=True, MUSIC=False):
if self.first_run:
self.first_run = False
return ""
if not LISTEN:
return self.prev
stop = False
while not stop:
input = self.pipein.readline()[:-1]
if input:
stop = True
input = str_formater.unicodeToUTF8(input, self.logger)
self.prev = input
return input
def say(self, phrase, OPTIONS=None):
self.logger.info(">>>>>>>>>>>>>>>>>>>")
self.logger.info("JAN: " + phrase )
self.logger.info(">>>>>>>>>>>>>>>>>>>")
phrase = alteration.clean(phrase)
self.speaker.say(phrase)
| true | true |
f72e503eef72185c5b47c7c34969961e1c0adab9 | 674 | py | Python | mars/deploy/local/__init__.py | lmatz/mars | 45f9166b54eb91b21e66cef8b590a41aa8ac9569 | [
"Apache-2.0"
] | 1 | 2018-12-26T08:37:04.000Z | 2018-12-26T08:37:04.000Z | mars/deploy/local/__init__.py | lmatz/mars | 45f9166b54eb91b21e66cef8b590a41aa8ac9569 | [
"Apache-2.0"
] | null | null | null | mars/deploy/local/__init__.py | lmatz/mars | 45f9166b54eb91b21e66cef8b590a41aa8ac9569 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .core import new_cluster
| 35.473684 | 74 | 0.749258 |
from .core import new_cluster
| true | true |
f72e50bca50dee1f81e09e845987a0210c3b21a2 | 1,948 | py | Python | great_expectations/render/page_renderer_util.py | isichei/great_expectations | b408f2e61f95a2d0f233902cedfb4b2807b814ad | [
"Apache-2.0"
] | 1 | 2020-10-22T19:54:10.000Z | 2020-10-22T19:54:10.000Z | great_expectations/render/page_renderer_util.py | isichei/great_expectations | b408f2e61f95a2d0f233902cedfb4b2807b814ad | [
"Apache-2.0"
] | 21 | 2020-08-05T07:15:47.000Z | 2021-04-29T05:35:52.000Z | great_expectations/render/page_renderer_util.py | isichei/great_expectations | b408f2e61f95a2d0f233902cedfb4b2807b814ad | [
"Apache-2.0"
] | null | null | null | import warnings
from great_expectations.render.renderer import ValidationResultsPageRenderer
from great_expectations.render.view import DefaultMarkdownPageView
from great_expectations.validation_operators.types.validation_operator_result import (
ValidationOperatorResult,
)
def render_multiple_validation_result_pages_markdown(
validation_operator_result: ValidationOperatorResult, run_info_at_end: bool = True,
) -> str:
"""
Loop through and render multiple validation results to markdown.
Args:
validation_operator_result: (ValidationOperatorResult) Result of validation operator run
run_info_at_end: move run info below expectation results
Returns:
string containing formatted markdown validation results
"""
warnings.warn(
"This 'render_multiple_validation_result_pages_markdown' function will be deprecated "
"Please use ValidationResultsPageRenderer.render_validation_operator_result() instead."
"E.g. to replicate the functionality of rendering a ValidationOperatorResult to markdown:"
"validation_results_page_renderer = ValidationResultsPageRenderer("
" run_info_at_end=run_info_at_end"
")"
"rendered_document_content_list = validation_results_page_renderer.render_validation_operator_result("
" validation_operator_result=validation_operator_result"
")"
'return " ".join(DefaultMarkdownPageView().render(rendered_document_content_list))'
"Please update code accordingly.",
DeprecationWarning,
)
validation_results_page_renderer = ValidationResultsPageRenderer(
run_info_at_end=run_info_at_end
)
rendered_document_content_list = validation_results_page_renderer.render_validation_operator_result(
validation_operator_result=validation_operator_result
)
return " ".join(DefaultMarkdownPageView().render(rendered_document_content_list))
| 43.288889 | 110 | 0.780287 | import warnings
from great_expectations.render.renderer import ValidationResultsPageRenderer
from great_expectations.render.view import DefaultMarkdownPageView
from great_expectations.validation_operators.types.validation_operator_result import (
ValidationOperatorResult,
)
def render_multiple_validation_result_pages_markdown(
validation_operator_result: ValidationOperatorResult, run_info_at_end: bool = True,
) -> str:
warnings.warn(
"This 'render_multiple_validation_result_pages_markdown' function will be deprecated "
"Please use ValidationResultsPageRenderer.render_validation_operator_result() instead."
"E.g. to replicate the functionality of rendering a ValidationOperatorResult to markdown:"
"validation_results_page_renderer = ValidationResultsPageRenderer("
" run_info_at_end=run_info_at_end"
")"
"rendered_document_content_list = validation_results_page_renderer.render_validation_operator_result("
" validation_operator_result=validation_operator_result"
")"
'return " ".join(DefaultMarkdownPageView().render(rendered_document_content_list))'
"Please update code accordingly.",
DeprecationWarning,
)
validation_results_page_renderer = ValidationResultsPageRenderer(
run_info_at_end=run_info_at_end
)
rendered_document_content_list = validation_results_page_renderer.render_validation_operator_result(
validation_operator_result=validation_operator_result
)
return " ".join(DefaultMarkdownPageView().render(rendered_document_content_list))
| true | true |
f72e511c4590e0ef1a936bbff413f8650790b642 | 302 | py | Python | Automation/Build_Mods_Rebuild.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | 1 | 2021-05-20T19:33:37.000Z | 2021-05-20T19:33:37.000Z | Automation/Build_Mods_Rebuild.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | Automation/Build_Mods_Rebuild.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | from Automation import Mods
import datetime
def Run () -> bool:
for modNamespace in Mods.GetAllModNames(): # type: str
Mods.BuildModRebuild(modNamespace)
print("All mods built. " + datetime.datetime.now().strftime("%I:%M %p"))
return True
if __name__ == "__main__":
if not Run():
exit(1)
| 18.875 | 73 | 0.692053 | from Automation import Mods
import datetime
def Run () -> bool:
for modNamespace in Mods.GetAllModNames():
Mods.BuildModRebuild(modNamespace)
print("All mods built. " + datetime.datetime.now().strftime("%I:%M %p"))
return True
if __name__ == "__main__":
if not Run():
exit(1)
| true | true |
f72e51b8b2347bbee5b69c71f604e4985594661d | 541 | py | Python | zmodulo/plot/text/text.py | aaruff/Z-Modulo | 53ae0b1e07c2b13cb08f7d803623010f508ba1b7 | [
"AFL-3.0"
] | 1 | 2019-03-24T03:12:28.000Z | 2019-03-24T03:12:28.000Z | zmodulo/plot/text/text.py | aaruff/Z-Modulo | 53ae0b1e07c2b13cb08f7d803623010f508ba1b7 | [
"AFL-3.0"
] | null | null | null | zmodulo/plot/text/text.py | aaruff/Z-Modulo | 53ae0b1e07c2b13cb08f7d803623010f508ba1b7 | [
"AFL-3.0"
] | null | null | null | class Text:
""" The Plot Text Text Template
"""
def __init__(self, text=""):
"""
Initializes the plot text Text
:param text: plot text text
:type text: str
"""
self.text = text
self.template = '\ttext = "{text}";\n'
def to_str(self):
"""
Converts the plot text text instance to a z-tree text property declaration.
:return: plot text text property declaration
:rtype: str
"""
return self.template.format(text=self.text)
| 25.761905 | 83 | 0.55268 | class Text:
def __init__(self, text=""):
self.text = text
self.template = '\ttext = "{text}";\n'
def to_str(self):
return self.template.format(text=self.text)
| true | true |
f72e5432db3d79bb9910e5b840d130990fb67b94 | 17,453 | py | Python | MoLtimestepping/C_Code_Generation.py | Steve-Hawk/nrpytutorial | 42d7450dba8bf43aa9c2d8f38f85f18803de69b7 | [
"BSD-2-Clause"
] | null | null | null | MoLtimestepping/C_Code_Generation.py | Steve-Hawk/nrpytutorial | 42d7450dba8bf43aa9c2d8f38f85f18803de69b7 | [
"BSD-2-Clause"
] | null | null | null | MoLtimestepping/C_Code_Generation.py | Steve-Hawk/nrpytutorial | 42d7450dba8bf43aa9c2d8f38f85f18803de69b7 | [
"BSD-2-Clause"
] | 1 | 2021-03-02T12:51:56.000Z | 2021-03-02T12:51:56.000Z | # As documented in the NRPy+ tutorial module
# Tutorial-RK_Butcher_Table_Generating_C_Code.ipynb,
# this module will produce the required C codes for
# allocating required memory Method of Lines (MoL) timestepping,
# implementing MoL timestepping, and deallocating memory
# Authors: Brandon Clark
# Zachariah B. Etienne
# zachetie **at** gmail **dot* com
# Step 1: Initialize needed Python/NRPy+ modules
import sympy as sp # Import SymPy, a computer algebra system written entirely in Python
import os # Standard Python module for multiplatform OS-level functions
from MoLtimestepping.RK_Butcher_Table_Dictionary import Butcher_dict
# Step 2: Checking if Butcher Table is Diagonal
def diagonal(key):
diagonal = True # Start with the Butcher table is diagonal
Butcher = Butcher_dict[key][0]
L = len(Butcher)-1 # Establish the number of rows to check for diagonal trait, all bust last row
row_idx = 0 # Initialize the Butcher table row index
for i in range(L): # Check all the desired rows
for j in range(1,row_idx): # Check each element before the diagonal element in a row
if Butcher[i][j] != sp.sympify(0): # If any non-diagonal coeffcient is non-zero,
# then the table is not diagonal
diagonal = False
return diagonal
row_idx += 1 # Update to check the next row
return diagonal
# Step 3.a: When allocating memory, we populate a list malloced_gridfunctions,
# which is used here to determine which gridfunctions need memory freed,
# via the free() command. Free the mallocs!
def free_allocated_memory(outdir,RK_method,malloced_gridfunctions):
# This step is made extremely easy, as we had to
with open(os.path.join(outdir, "RK_Free_Memory.h"), "w") as file:
file.write("// Code snippet freeing gridfunction memory for \"" + RK_method + "\" method:\n")
for gridfunction in malloced_gridfunctions:
file.write("free(" + gridfunction + ");\n")
# # State whether each Butcher table is diagonal or not
# for key, value in Butcher_dict.items():
# if diagonal(key) == True:
# print("The RK method "+str(key)+" is diagonal! \n")
# else:
# print("The RK method "+str(key)+" is NOT diagonal! \n")
# #################################################################
# Step 3.b: Main driver function for outputting all the MoL C Code
def MoL_C_Code_Generation(RK_method = "RK4", RHS_string = "", post_RHS_string = "",outdir="MoLtimestepping/",
MemAllocOnly=False):
####### Step 3.b.i: Allocating Memory
malloc_str = "// Code snippet allocating gridfunction memory for \"" + RK_method + "\" method:\n"
# Loop over grids
malloced_gridfunctions = []
# Set gridfunction type
type_str = "REAL *restrict "
# Define a couple useful functions for outputting the needed C code for allocating memory
def malloc_gfs_str(varname):
malloced_gridfunctions.append(varname)
memory_alloc_str = " = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot"+")"
return type_str + varname + memory_alloc_str + ";\n"
def diagnostic_output_gfs_equal_to(gfs):
return type_str + "diagnostic_output_gfs"+" = "+gfs + ";\n"
# No matter the method we define gridfunctions "y_n_gfs" to store the initial data
malloc_str += malloc_gfs_str("y_n_gfs")
if diagonal(RK_method) == True and "RK3" in RK_method:
malloc_str += malloc_gfs_str("k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs")
malloc_str += malloc_gfs_str("k2_or_y_nplus_a32_k2_gfs")
malloc_str += diagnostic_output_gfs_equal_to("k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs")
else:
if diagonal(RK_method) == False: # Allocate memory for non-diagonal Butcher tables
# Determine the number of k_i steps based on length of Butcher Table
num_k = len(Butcher_dict[RK_method][0])-1
# For non-diagonal tables an intermediate gridfunction "next_y_input" is used for rhs evaluations
malloc_str += malloc_gfs_str("next_y_input_gfs")
for i in range(num_k): # Need to allocate all k_i steps for a given method
malloc_str += malloc_gfs_str("k"+str(i+1)+"_gfs")
malloc_str += diagnostic_output_gfs_equal_to("k1_gfs")
else: # Allocate memory for diagonal Butcher tables, which use a "y_nplus1_running_total gridfunction"
malloc_str += malloc_gfs_str("y_nplus1_running_total_gfs")
if RK_method != 'Euler': # Allocate memory for diagonal Butcher tables that aren't Euler
# Need k_odd for k_1,3,5... and k_even for k_2,4,6...
malloc_str += malloc_gfs_str("k_odd_gfs")
malloc_str += malloc_gfs_str("k_even_gfs")
malloc_str += diagnostic_output_gfs_equal_to("y_nplus1_running_total_gfs")
with open(os.path.join(outdir,"RK_Allocate_Memory.h"), "w") as file:
file.write(malloc_str)
if MemAllocOnly:
free_allocated_memory(outdir,RK_method,malloced_gridfunctions)
return
########################################################################################################################
# EXAMPLE
# ODE: y' = f(t,y), y(t_0) = y_0
# Starting at time t_n with solution having value y_n and trying to update to y_nplus1 with timestep dt
# Example of scheme for RK4 with k_1, k_2, k_3, k_4 (Using non-diagonal algortihm) Notice this requires storage of
# y_n, y_nplus1, k_1 through k_4
# k_1 = dt*f(t_n, y_n)
# k_2 = dt*f(t_n + 1/2*dt, y_n + 1/2*k_1)
# k_3 = dt*f(t_n + 1/2*dt, y_n + 1/2*k_2)
# k_4 = dt*f(t_n + dt, y_n + k_3)
# y_nplus1 = y_n + 1/3k_1 + 1/6k_2 + 1/6k_3 + 1/3k_4
# Example of scheme RK4 using only k_odd and k_even (Diagonal algroithm) Notice that this only requires storage
# k_odd = dt*f(t_n, y_n)
# y_nplus1 = 1/3*k_odd
# k_even = dt*f(t_n + 1/2*dt, y_n + 1/2*k_odd)
# y_nplus1 += 1/6*k_even
# k_odd = dt*f(t_n + 1/2*dt, y_n + 1/2*k_even)
# y_nplus1 += 1/6*k_odd
# k_even = dt*f(t_n + dt, y_n + k_odd)
# y_nplus1 += 1/3*k_even
########################################################################################################################
####### Step 3.b.ii: Implementing the Runge Kutta Scheme for Method of Lines Timestepping
Butcher = Butcher_dict[RK_method][0] # Get the desired Butcher table from the dictionary
num_steps = len(Butcher)-1 # Specify the number of required steps to update solution
indent = " "
RK_str = "// Code snippet implementing "+RK_method+" algorithm for Method of Lines timestepping\n"
# Diagonal RK3 only!!!
def single_RK_substep(commentblock, RHS_str, RHS_input_str, RHS_output_str, RK_lhss_list, RK_rhss_list,
post_RHS_list, post_RHS_output_list, indent = " "):
return_str = commentblock + "\n"
if not isinstance(RK_lhss_list,list):
RK_lhss_list = [RK_lhss_list]
if not isinstance(RK_rhss_list,list):
RK_rhss_list = [RK_rhss_list]
if not isinstance(post_RHS_list,list):
post_RHS_list = [post_RHS_list]
if not isinstance(post_RHS_output_list,list):
post_RHS_output_list = [post_RHS_output_list]
# Part 1: RHS evaluation:
return_str += RHS_str.replace("RK_INPUT_GFS", RHS_input_str).\
replace("RK_OUTPUT_GFS",RHS_output_str)+"\n"
# Part 2: RK update
return_str += "LOOP_ALL_GFS_GPS"+"(i) {\n"
for lhs,rhs in zip(RK_lhss_list,RK_rhss_list):
return_str += indent + lhs + "[i] = " + rhs.replace("_gfs","_gfs") + ";\n"
return_str += "}\n"
# Part 3: Call post-RHS functions
for post_RHS,post_RHS_output in zip(post_RHS_list,post_RHS_output_list):
return_str += post_RHS.replace("RK_OUTPUT_GFS",post_RHS_output)+"\n"
return return_str+"\n"
RK_str = "// C code implementation of " + RK_method + " Method of Lines timestepping.\n"
if diagonal(RK_method) == True and "RK3" in RK_method:
# In a diagonal RK3 method, only 3 gridfunctions need be defined. Below implements this approach.
# k_1
RK_str += """
// In a diagonal RK3 method like this one, only 3 gridfunctions need be defined. Below implements this approach.
// Using y_n_gfs as input, k1 and apply boundary conditions\n"""
RK_str += single_RK_substep(
commentblock = """
// ***k1 substep:***
// 1. We will store k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs now as
// ... the update for the next rhs evaluation y_n + a21*k1*dt
// Post-RHS evaluation:
// 1. Apply post-RHS to y_n + a21*k1*dt""",
RHS_str = RHS_string,
RHS_input_str = "y_n_gfs", RHS_output_str = "k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs",
RK_lhss_list = ["k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs"],
RK_rhss_list = ["("+sp.ccode(Butcher[1][1]).replace("L","")+")*k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs[i]*dt + y_n_gfs[i]"],
post_RHS_list = [post_RHS_string], post_RHS_output_list = ["k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs"])
# k_2
RK_str += single_RK_substep(
commentblock="""
// ***k2 substep:***
// 1. Reassign k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs to be the running total y_{n+1}; a32*k2*dt to the running total
// 2. Store k2_or_y_nplus_a32_k2_gfs now as y_n + a32*k2*dt
// Post-RHS evaluation:
// 1. Apply post-RHS to both y_n + a32*k2 (stored in k2_or_y_nplus_a32_k2_gfs)
// ... and the y_{n+1} running total, as they have not been applied yet to k2-related gridfunctions""",
RHS_str=RHS_string,
RHS_input_str="k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs", RHS_output_str="k2_or_y_nplus_a32_k2_gfs",
RK_lhss_list=["k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs","k2_or_y_nplus_a32_k2_gfs"],
RK_rhss_list=["("+sp.ccode(Butcher[3][1]).replace("L","")+")*(k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs[i] - y_n_gfs[i])/("+sp.ccode(Butcher[1][1]).replace("L","")+") + y_n_gfs[i] + ("+sp.ccode(Butcher[3][2]).replace("L","")+")*k2_or_y_nplus_a32_k2_gfs[i]*dt",
"("+sp.ccode(Butcher[2][2]).replace("L","")+")*k2_or_y_nplus_a32_k2_gfs[i]*dt + y_n_gfs[i]"],
post_RHS_list=[post_RHS_string,post_RHS_string],
post_RHS_output_list=["k2_or_y_nplus_a32_k2_gfs","k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs"])
# k_3
RK_str += single_RK_substep(
commentblock="""
// ***k3 substep:***
// 1. Add k3 to the running total and save to y_n
// Post-RHS evaluation:
// 1. Apply post-RHS to y_n""",
RHS_str=RHS_string,
RHS_input_str="k2_or_y_nplus_a32_k2_gfs", RHS_output_str="y_n_gfs",
RK_lhss_list=["y_n_gfs","k2_or_y_nplus_a32_k2_gfs"],
RK_rhss_list=["k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs[i] + ("+sp.ccode(Butcher[3][3]).replace("L","")+")*y_n_gfs[i]*dt"],
post_RHS_list=[post_RHS_string],
post_RHS_output_list=["y_n_gfs"])
else:
y_n = "y_n_gfs"
if diagonal(RK_method) == False:
for s in range(num_steps):
next_y_input = "next_y_input_gfs"
# If we're on the first step (s=0), we use y_n gridfunction as input.
# Otherwise next_y_input is input. Output is just the reverse.
if s==0: # If on first step:
RHS_input = y_n
else: # If on second step or later:
RHS_input = next_y_input
RHS_output = "k" + str(s + 1) + "_gfs"
if s == num_steps-1: # If on final step:
RK_lhs = y_n
RK_rhs = y_n + "[i] + dt*("
else: # If on anything but the final step:
RK_lhs = next_y_input
RK_rhs = y_n + "[i] + dt*("
for m in range(s+1):
if Butcher[s+1][m+1] != 0:
if Butcher[s+1][m+1] != 1:
RK_rhs += " + k"+str(m+1)+"_gfs[i]*("+sp.ccode(Butcher[s+1][m+1]).replace("L","")+")"
else:
RK_rhs += " + k"+str(m+1)+"_gfs[i]"
RK_rhs += " )"
post_RHS = post_RHS_string
if s == num_steps-1: # If on final step:
post_RHS_output = y_n
else: # If on anything but the final step:
post_RHS_output = next_y_input
RK_str += single_RK_substep(
commentblock="// ***k" + str(s + 1) + " substep:***",
RHS_str=RHS_string,
RHS_input_str=RHS_input, RHS_output_str=RHS_output,
RK_lhss_list=[RK_lhs], RK_rhss_list=[RK_rhs],
post_RHS_list=[post_RHS],
post_RHS_output_list=[post_RHS_output])
else:
y_nplus1_running_total = "y_nplus1_running_total_gfs"
if RK_method == 'Euler': # Euler's method doesn't require any k_i, and gets its own unique algorithm
RK_str += single_RK_substep(
commentblock="// ***Euler timestepping only requires one RHS evaluation***",
RHS_str=RHS_string,
RHS_input_str=y_n, RHS_output_str=y_nplus1_running_total,
RK_lhss_list=[y_n], RK_rhss_list=[y_n+"[i] + "+y_nplus1_running_total+"[i]*dt"],
post_RHS_list=[post_RHS_string],
post_RHS_output_list=[y_n])
else:
for s in range(num_steps):
# If we're on the first step (s=0), we use y_n gridfunction as input.
# and k_odd as output.
if s == 0:
RHS_input = "y_n_gfs"
RHS_output = "k_odd_gfs"
# For the remaining steps the inputs and ouputs alternate between k_odd and k_even
elif s%2 == 0:
RHS_input = "k_even_gfs"
RHS_output = "k_odd_gfs"
else:
RHS_input = "k_odd_gfs"
RHS_output = "k_even_gfs"
RK_lhs_list = []
RK_rhs_list = []
if s != num_steps-1: # For anything besides the final step
if s == 0: # The first RK step
RK_lhs_list.append(y_nplus1_running_total)
RK_rhs_list.append(RHS_output+"[i]*dt*("+sp.ccode(Butcher[num_steps][s+1]).replace("L","")+")")
RK_lhs_list.append(RHS_output)
RK_rhs_list.append(y_n+"[i] + "+RHS_output+"[i]*dt*("+sp.ccode(Butcher[s+1][s+1]).replace("L","")+")")
else:
if Butcher[num_steps][s+1] !=0:
RK_lhs_list.append(y_nplus1_running_total)
if Butcher[num_steps][s+1] !=1:
RK_rhs_list.append(y_nplus1_running_total+"[i] + "+RHS_output+"[i]*dt*("+sp.ccode(Butcher[num_steps][s+1]).replace("L","")+")")
else:
RK_rhs_list.append(y_nplus1_running_total+"[i] + "+RHS_output+"[i]*dt")
if Butcher[s+1][s+1] !=0:
RK_lhs_list.append(RHS_output)
if Butcher[s+1][s+1] !=1:
RK_rhs_list.append(y_n+"[i] + "+RHS_output+"[i]*dt*("+sp.ccode(Butcher[s+1][s+1]).replace("L","")+")")
else:
RK_rhs_list.append(y_n+"[i] + "+RHS_output+"[i]*dt")
post_RHS_output = RHS_output
if s == num_steps-1: # If on the final step
if Butcher[num_steps][s+1] != 0:
RK_lhs_list.append(y_n)
if Butcher[num_steps][s+1] != 1:
RK_rhs_list.append(y_n+"[i] + "+y_nplus1_running_total+"[i] + "+RHS_output+"[i]*dt*("+sp.ccode(Butcher[num_steps][s+1]).replace("L","")+")")
else:
RK_rhs_list.append(y_n+"[i] + "+y_nplus1_running_total+"[i] + "+RHS_output+"[i]*dt)")
post_RHS_output = y_n
RK_str += single_RK_substep(
commentblock="// ***k" + str(s + 1) + " substep:***",
RHS_str=RHS_string,
RHS_input_str=RHS_input, RHS_output_str=RHS_output,
RK_lhss_list=RK_lhs_list, RK_rhss_list=RK_rhs_list,
post_RHS_list=[post_RHS_string],
post_RHS_output_list=[post_RHS_output])
with open(os.path.join(outdir,"RK_MoL.h"), "w") as file:
file.write(RK_str)
####### Step 3.b.iii: Freeing Allocated Memory
free_allocated_memory(outdir,RK_method,malloced_gridfunctions) | 53.701538 | 281 | 0.580531 |
import sympy as sp
import os
from MoLtimestepping.RK_Butcher_Table_Dictionary import Butcher_dict
def diagonal(key):
diagonal = True
Butcher = Butcher_dict[key][0]
L = len(Butcher)-1
row_idx = 0
for i in range(L):
for j in range(1,row_idx):
if Butcher[i][j] != sp.sympify(0):
diagonal = False
return diagonal
row_idx += 1
return diagonal
def free_allocated_memory(outdir,RK_method,malloced_gridfunctions):
with open(os.path.join(outdir, "RK_Free_Memory.h"), "w") as file:
file.write("// Code snippet freeing gridfunction memory for \"" + RK_method + "\" method:\n")
for gridfunction in malloced_gridfunctions:
file.write("free(" + gridfunction + ");\n")
RK_lhs_list.append(y_nplus1_running_total)
RK_rhs_list.append(RHS_output+"[i]*dt*("+sp.ccode(Butcher[num_steps][s+1]).replace("L","")+")")
RK_lhs_list.append(RHS_output)
RK_rhs_list.append(y_n+"[i] + "+RHS_output+"[i]*dt*("+sp.ccode(Butcher[s+1][s+1]).replace("L","")+")")
else:
if Butcher[num_steps][s+1] !=0:
RK_lhs_list.append(y_nplus1_running_total)
if Butcher[num_steps][s+1] !=1:
RK_rhs_list.append(y_nplus1_running_total+"[i] + "+RHS_output+"[i]*dt*("+sp.ccode(Butcher[num_steps][s+1]).replace("L","")+")")
else:
RK_rhs_list.append(y_nplus1_running_total+"[i] + "+RHS_output+"[i]*dt")
if Butcher[s+1][s+1] !=0:
RK_lhs_list.append(RHS_output)
if Butcher[s+1][s+1] !=1:
RK_rhs_list.append(y_n+"[i] + "+RHS_output+"[i]*dt*("+sp.ccode(Butcher[s+1][s+1]).replace("L","")+")")
else:
RK_rhs_list.append(y_n+"[i] + "+RHS_output+"[i]*dt")
post_RHS_output = RHS_output
if s == num_steps-1:
if Butcher[num_steps][s+1] != 0:
RK_lhs_list.append(y_n)
if Butcher[num_steps][s+1] != 1:
RK_rhs_list.append(y_n+"[i] + "+y_nplus1_running_total+"[i] + "+RHS_output+"[i]*dt*("+sp.ccode(Butcher[num_steps][s+1]).replace("L","")+")")
else:
RK_rhs_list.append(y_n+"[i] + "+y_nplus1_running_total+"[i] + "+RHS_output+"[i]*dt)")
post_RHS_output = y_n
RK_str += single_RK_substep(
commentblock="// ***k" + str(s + 1) + " substep:***",
RHS_str=RHS_string,
RHS_input_str=RHS_input, RHS_output_str=RHS_output,
RK_lhss_list=RK_lhs_list, RK_rhss_list=RK_rhs_list,
post_RHS_list=[post_RHS_string],
post_RHS_output_list=[post_RHS_output])
with open(os.path.join(outdir,"RK_MoL.h"), "w") as file:
file.write(RK_str)
| true | true |
f72e55e4350c1244e5fa9db22aa56f8fc1fa74e5 | 19,603 | py | Python | benchmarks/launch_benchmark.py | wesleyhuang2014/intelai-models | f64dd11e6542a14bbc6048b6167201d3499f4bf1 | [
"Apache-2.0"
] | null | null | null | benchmarks/launch_benchmark.py | wesleyhuang2014/intelai-models | f64dd11e6542a14bbc6048b6167201d3499f4bf1 | [
"Apache-2.0"
] | null | null | null | benchmarks/launch_benchmark.py | wesleyhuang2014/intelai-models | f64dd11e6542a14bbc6048b6167201d3499f4bf1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import signal
import subprocess
import sys
from argparse import ArgumentParser
from common import base_benchmark_util
from common import platform_util
from common.utils.validators import check_no_spaces, check_volume_mount, check_shm_size
from common.base_model_init import BaseModelInitializer
class LaunchBenchmark(base_benchmark_util.BaseBenchmarkUtil):
"""Launches benchmarking job based on the specified args """
def __init__(self, *args, **kwargs):
super(LaunchBenchmark, self).__init__(*args, **kwargs)
self.args, self.unknown_args = self.parse_args()
try:
self.validate_args()
except (IOError, ValueError) as e:
sys.exit("\nError: {}".format(e))
def main(self):
benchmark_scripts = os.path.dirname(os.path.realpath(__file__))
use_case = self.get_model_use_case(benchmark_scripts)
intelai_models = self.get_model_dir(benchmark_scripts, use_case)
intelai_models_common = self.get_model_dir(benchmark_scripts, "common")
env_var_dict = self.get_env_vars(benchmark_scripts, use_case, intelai_models,
intelai_models_common)
if self.args.docker_image:
if self.args.framework == 'tensorflow_serving':
self.run_bare_metal(benchmark_scripts, intelai_models,
intelai_models_common, env_var_dict)
elif self.args.framework == 'tensorflow':
self.run_docker_container(benchmark_scripts, intelai_models,
intelai_models_common, env_var_dict)
else:
self.run_bare_metal(benchmark_scripts, intelai_models,
intelai_models_common, env_var_dict)
def parse_args(self):
# Additional args that are only used with the launch script
arg_parser = ArgumentParser(
parents=[self._common_arg_parser],
description="Parse args for benchmark interface")
arg_parser.add_argument(
"--docker-image",
help="Specify the docker image/tag to use when running benchmarking within a container."
"If no docker image is specified, then no docker container will be used.",
dest="docker_image", default=None, type=check_no_spaces)
arg_parser.add_argument(
"--volume",
help="Specify a custom volume to mount in the container, which follows the same format as the "
"docker --volume flag (https://docs.docker.com/storage/volumes/). "
"This argument can only be used in conjunction with a --docker-image.",
action="append", dest="custom_volumes", type=check_volume_mount)
arg_parser.add_argument(
"--shm-size",
help="Specify the size of docker /dev/shm. The format is <number><unit>. "
"number must be greater than 0. Unit is optional and can be b (bytes), k (kilobytes), "
"m (megabytes), or g (gigabytes).",
dest="shm_size", default="64m", type=check_shm_size)
arg_parser.add_argument(
"--debug", help="Launches debug mode which doesn't execute "
"start.sh when running in a docker container.", action="store_true")
arg_parser.add_argument(
"--noinstall",
help="whether to install packages for a given model when running in docker "
"(default --noinstall='False') or on bare metal (default --noinstall='True')",
dest="noinstall", action="store_true", default=None)
return arg_parser.parse_known_args()
def validate_args(self):
"""validate the args"""
# validate that we support this framework by checking folder names
benchmark_dir = os.path.dirname(os.path.realpath(__file__))
if glob.glob("{}/*/{}".format(benchmark_dir, self.args.framework)) == []:
raise ValueError("The specified framework is not supported: {}".
format(self.args.framework))
# if neither benchmark_only or accuracy_only are specified, then enable
# benchmark_only as the default
if not self.args.benchmark_only and not self.args.accuracy_only:
self.args.benchmark_only = True
# default disable_tcmalloc=False for int8 and disable_tcmalloc=True for other precisions
if not self.args.disable_tcmalloc:
self.args.disable_tcmalloc = str(self.args.precision != "int8")
if self.args.custom_volumes and not self.args.docker_image:
raise ValueError("Volume mounts can only be used when running in a docker container "
"(a --docker-image must be specified when using --volume).")
if self.args.mode == "inference" and self.args.checkpoint:
print("Warning: The --checkpoint argument is being deprecated in favor of using frozen graphs.")
def get_model_use_case(self, benchmark_scripts):
"""
Infers the use case based on the directory structure for the specified model.
"""
args = self.args
# find the path to the model's benchmarks folder
search_path = os.path.join(
benchmark_scripts, "*", args.framework, args.model_name,
args.mode, args.precision)
matches = glob.glob(search_path)
error_str = ""
if len(matches) > 1:
error_str = "Found multiple model locations for {} {} {}"
elif len(matches) == 0:
error_str = "No model was found for {} {} {}"
if error_str:
raise ValueError(error_str.format(args.framework, args.model_name, args.precision))
# use the benchmarks directory path to find the use case
dir_list = matches[0].split("/")
# find the last occurrence of framework in the list, then return
# the element before it in the path, which is the use case
return next(dir_list[elem - 1] for elem in range(len(dir_list) - 1, -1, -1)
if dir_list[elem] == args.framework)
def get_model_dir(self, benchmark_scripts, use_case):
"""
Finds the path to the optimized model directory in this repo, if it exists.
"""
# use the models directory as a default
intelai_models = os.path.join(benchmark_scripts, os.pardir, "models")
if use_case == "common":
return os.path.join(intelai_models, "common", self.args.framework)
# find the intelai_optimized model directory
args = self.args
optimized_model_dir = os.path.join(
benchmark_scripts, os.pardir, "models", use_case,
args.framework, args.model_name)
# if we find an optimized model, then we will use that path
if os.path.isdir(optimized_model_dir):
intelai_models = optimized_model_dir
return intelai_models
def get_env_vars(self, benchmark_scripts, use_case, intelai_models,
intelai_models_common):
"""
Sets up dictionary of standard env vars that are used by start.sh
"""
# Standard env vars
args = self.args
env_var_dict = {
"ACCURACY_ONLY": args.accuracy_only,
"BACKBONE_MODEL_DIRECTORY_VOL": args.backbone_model,
"BATCH_SIZE": args.batch_size,
"BENCHMARK_ONLY": args.benchmark_only,
"BENCHMARK_SCRIPTS": benchmark_scripts,
"CHECKPOINT_DIRECTORY_VOL": args.checkpoint,
"DATASET_LOCATION_VOL": args.data_location,
"DATA_NUM_INTER_THREADS": args.data_num_inter_threads,
"DATA_NUM_INTRA_THREADS": args.data_num_intra_threads,
"DISABLE_TCMALLOC": args.disable_tcmalloc,
"DOCKER": args.docker_image or str(args.docker_image is not None),
"EXTERNAL_MODELS_SOURCE_DIRECTORY": args.model_source_dir,
"FRAMEWORK": args.framework,
"INTELAI_MODELS": intelai_models,
"INTELAI_MODELS_COMMON": intelai_models_common,
"MODE": args.mode,
"MODEL_NAME": args.model_name,
"MPI_HOSTNAMES": args.mpi_hostnames,
"MPI_NUM_PROCESSES": args.mpi,
"MPI_NUM_PROCESSES_PER_SOCKET": args.num_mpi,
"NOINSTALL": str(args.noinstall) if args.noinstall is not None else "True" if not args.docker_image else "False", # noqa: E501
"NUM_CORES": args.num_cores,
"NUM_INTER_THREADS": args.num_inter_threads,
"NUM_INTRA_THREADS": args.num_intra_threads,
"NUM_TRAIN_STEPS": args.num_train_steps,
"OUTPUT_RESULTS": args.output_results,
"PRECISION": args.precision,
"PYTHON_EXE": sys.executable if not args.docker_image else "python",
"SOCKET_ID": args.socket_id,
"TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD": args.tcmalloc_large_alloc_report_threshold,
"TF_SERVING_VERSION": args.tf_serving_version,
"USE_CASE": use_case,
"VERBOSE": args.verbose
}
# Add custom model args as env vars)
for custom_arg in args.model_args + self.unknown_args:
if "=" not in custom_arg:
raise ValueError("Expected model args in the format "
"`name=value` but received: {}".
format(custom_arg))
split_arg = custom_arg.split("=")
split_arg[0] = split_arg[0].replace("-", "_").lstrip('_')
env_var_dict[split_arg[0]] = split_arg[1]
return env_var_dict
def run_bare_metal(self, benchmark_scripts, intelai_models,
intelai_models_common, env_var_dict):
"""
Runs the model without a container
"""
# setup volume directories to be the local system directories, since we aren't
# mounting volumes when running bare metal, but start.sh expects these args
args = self.args
workspace = os.path.join(benchmark_scripts, "common", args.framework)
mount_benchmark = benchmark_scripts
in_graph_path = args.input_graph
checkpoint_path = args.checkpoint
backbone_model_path = args.backbone_model
dataset_path = args.data_location
mount_external_models_source = args.model_source_dir
mount_intelai_models = intelai_models
# To Launch Tensorflow Serving benchmark we need only --in-graph arg.
# It does not support checkpoint files.
if args.framework == "tensorflow_serving":
if checkpoint_path:
raise ValueError("--checkpoint-path arg is not supported with tensorflow serving benchmarking")
if args.mode != "inference":
raise ValueError("--mode arg should be set to inference")
if in_graph_path:
env_var_dict["IN_GRAPH"] = in_graph_path
else:
raise ValueError("--in-graph arg is required to run tensorflow serving benchmarking")
for env_var_name in env_var_dict:
os.environ[env_var_name] = str(env_var_dict[env_var_name])
# We need this env to be set for the platform util
os.environ["PYTHON_EXE"] = str(sys.executable if not args.docker_image else "python")
# Get Platformutil
platform_util_obj = None or platform_util.PlatformUtil(self.args)
# Configure num_inter_threads and num_intra_threads
base_obj = BaseModelInitializer(args=self.args, custom_args=[], platform_util=platform_util_obj)
base_obj.set_num_inter_intra_threads()
# Update num_inter_threads and num_intra_threads in env dictionary
env_var_dict["NUM_INTER_THREADS"] = self.args.num_inter_threads
env_var_dict["NUM_INTRA_THREADS"] = self.args.num_intra_threads
# Set OMP_NUM_THREADS
env_var_dict["OMP_NUM_THREADS"] = self.args.num_intra_threads
else:
mount_external_models_source = args.model_source_dir
mount_intelai_models = intelai_models
mount_intelai_models_common = intelai_models_common
# Add env vars with bare metal settings
env_var_dict["MOUNT_EXTERNAL_MODELS_SOURCE"] = mount_external_models_source
env_var_dict["MOUNT_INTELAI_MODELS_SOURCE"] = mount_intelai_models
env_var_dict["MOUNT_INTELAI_MODELS_COMMON_SOURCE"] = mount_intelai_models_common
if in_graph_path:
env_var_dict["IN_GRAPH"] = in_graph_path
if checkpoint_path:
env_var_dict["CHECKPOINT_DIRECTORY"] = checkpoint_path
if backbone_model_path:
env_var_dict["BACKBONE_MODEL_DIRECTORY"] = backbone_model_path
if dataset_path:
env_var_dict["DATASET_LOCATION"] = dataset_path
# if using the default output directory, get the full path
if args.output_dir == "/models/benchmarks/common/tensorflow/logs":
args.output_dir = os.path.join(workspace, "logs")
# Add env vars with bare metal settings
env_var_dict["WORKSPACE"] = workspace
env_var_dict["MOUNT_BENCHMARK"] = mount_benchmark
env_var_dict["OUTPUT_DIR"] = args.output_dir
# Set env vars for bare metal
for env_var_name in env_var_dict:
os.environ[env_var_name] = str(env_var_dict[env_var_name])
# Run the start script
start_script = os.path.join(workspace, "start.sh")
self._launch_command(["bash", start_script])
def run_docker_container(self, benchmark_scripts, intelai_models,
intelai_models_common, env_var_dict):
"""
Runs a docker container with the specified image and environment
variables to start running the benchmarking job.
"""
args = self.args
mount_benchmark = "/workspace/benchmarks"
mount_external_models_source = "/workspace/models"
mount_intelai_models = "/workspace/intelai_models"
mount_intelai_models_common = "/workspace/intelai_models_common"
workspace = os.path.join(mount_benchmark, "common", args.framework)
mount_output_dir = False
output_dir = os.path.join(workspace, 'logs')
if args.output_dir != "/models/benchmarks/common/tensorflow/logs":
# we don't need to mount log dir otherwise since default is workspace folder
mount_output_dir = True
output_dir = args.output_dir
in_graph_dir = os.path.dirname(args.input_graph) if args.input_graph \
else ""
in_graph_filename = os.path.basename(args.input_graph) if \
args.input_graph else ""
# env vars with docker settings
env_vars = ["--env", "WORKSPACE={}".format(workspace),
"--env", "MOUNT_BENCHMARK={}".format(mount_benchmark),
"--env", "MOUNT_EXTERNAL_MODELS_SOURCE={}".format(mount_external_models_source),
"--env", "MOUNT_INTELAI_MODELS_SOURCE={}".format(mount_intelai_models),
"--env", "MOUNT_INTELAI_MODELS_COMMON_SOURCE={}".format(mount_intelai_models_common),
"--env", "OUTPUT_DIR={}".format(output_dir)]
if args.input_graph:
env_vars += ["--env", "IN_GRAPH=/in_graph/{}".format(in_graph_filename)]
if args.data_location:
env_vars += ["--env", "DATASET_LOCATION=/dataset"]
if args.checkpoint:
env_vars += ["--env", "CHECKPOINT_DIRECTORY=/checkpoints"]
if args.backbone_model:
env_vars += ["--env", "BACKBONE_MODEL_DIRECTORY=/backbone_model"]
# Add env vars with common settings
for env_var_name in env_var_dict:
env_vars += ["--env", "{}={}".format(env_var_name, env_var_dict[env_var_name])]
# Add proxy to env variables if any set on host
for environment_proxy_setting in [
"http_proxy",
"ftp_proxy",
"https_proxy",
"no_proxy",
]:
if not os.environ.get(environment_proxy_setting):
continue
env_vars.append("--env")
env_vars.append("{}={}".format(
environment_proxy_setting,
os.environ.get(environment_proxy_setting)
))
volume_mounts = ["--volume", "{}:{}".format(benchmark_scripts, mount_benchmark),
"--volume", "{}:{}".format(args.model_source_dir, mount_external_models_source),
"--volume", "{}:{}".format(intelai_models, mount_intelai_models),
"--volume", "{}:{}".format(intelai_models_common, mount_intelai_models_common)]
if mount_output_dir:
volume_mounts.extend([
"--volume", "{}:{}".format(output_dir, output_dir)])
if args.data_location:
volume_mounts.extend([
"--volume", "{}:{}".format(args.data_location, "/dataset")])
if args.checkpoint:
volume_mounts.extend([
"--volume", "{}:{}".format(args.checkpoint, "/checkpoints")])
if args.backbone_model:
volume_mounts.extend([
"--volume", "{}:{}".format(args.backbone_model, "/backbone_model")])
if in_graph_dir:
volume_mounts.extend([
"--volume", "{}:{}".format(in_graph_dir, "/in_graph")])
if args.custom_volumes:
for custom_volume in args.custom_volumes:
volume_mounts.extend(["--volume", custom_volume])
docker_run_cmd = ["docker", "run"]
# only use -it when debugging, otherwise we might get TTY error
if args.debug:
docker_run_cmd.append("-it")
docker_shm_size = "--shm-size={}".format(args.shm_size)
docker_run_cmd = docker_run_cmd + env_vars + volume_mounts + [
docker_shm_size, "--privileged", "-u", "root:root", "-w",
workspace, args.docker_image, "/bin/bash"]
if not args.debug:
docker_run_cmd.append("start.sh")
if args.verbose:
print("Docker run command:\n{}".format(docker_run_cmd))
self._launch_command(docker_run_cmd)
def _launch_command(self, run_cmd):
"""runs command that runs the start script in a container or on bare metal and exits on ctrl c"""
p = subprocess.Popen(run_cmd, preexec_fn=os.setsid)
try:
p.communicate()
except KeyboardInterrupt:
os.killpg(os.getpgid(p.pid), signal.SIGKILL)
if __name__ == "__main__":
util = LaunchBenchmark()
util.main()
| 43.756696 | 139 | 0.631179 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import signal
import subprocess
import sys
from argparse import ArgumentParser
from common import base_benchmark_util
from common import platform_util
from common.utils.validators import check_no_spaces, check_volume_mount, check_shm_size
from common.base_model_init import BaseModelInitializer
class LaunchBenchmark(base_benchmark_util.BaseBenchmarkUtil):
def __init__(self, *args, **kwargs):
super(LaunchBenchmark, self).__init__(*args, **kwargs)
self.args, self.unknown_args = self.parse_args()
try:
self.validate_args()
except (IOError, ValueError) as e:
sys.exit("\nError: {}".format(e))
def main(self):
benchmark_scripts = os.path.dirname(os.path.realpath(__file__))
use_case = self.get_model_use_case(benchmark_scripts)
intelai_models = self.get_model_dir(benchmark_scripts, use_case)
intelai_models_common = self.get_model_dir(benchmark_scripts, "common")
env_var_dict = self.get_env_vars(benchmark_scripts, use_case, intelai_models,
intelai_models_common)
if self.args.docker_image:
if self.args.framework == 'tensorflow_serving':
self.run_bare_metal(benchmark_scripts, intelai_models,
intelai_models_common, env_var_dict)
elif self.args.framework == 'tensorflow':
self.run_docker_container(benchmark_scripts, intelai_models,
intelai_models_common, env_var_dict)
else:
self.run_bare_metal(benchmark_scripts, intelai_models,
intelai_models_common, env_var_dict)
def parse_args(self):
arg_parser = ArgumentParser(
parents=[self._common_arg_parser],
description="Parse args for benchmark interface")
arg_parser.add_argument(
"--docker-image",
help="Specify the docker image/tag to use when running benchmarking within a container."
"If no docker image is specified, then no docker container will be used.",
dest="docker_image", default=None, type=check_no_spaces)
arg_parser.add_argument(
"--volume",
help="Specify a custom volume to mount in the container, which follows the same format as the "
"docker --volume flag (https://docs.docker.com/storage/volumes/). "
"This argument can only be used in conjunction with a --docker-image.",
action="append", dest="custom_volumes", type=check_volume_mount)
arg_parser.add_argument(
"--shm-size",
help="Specify the size of docker /dev/shm. The format is <number><unit>. "
"number must be greater than 0. Unit is optional and can be b (bytes), k (kilobytes), "
"m (megabytes), or g (gigabytes).",
dest="shm_size", default="64m", type=check_shm_size)
arg_parser.add_argument(
"--debug", help="Launches debug mode which doesn't execute "
"start.sh when running in a docker container.", action="store_true")
arg_parser.add_argument(
"--noinstall",
help="whether to install packages for a given model when running in docker "
"(default --noinstall='False') or on bare metal (default --noinstall='True')",
dest="noinstall", action="store_true", default=None)
return arg_parser.parse_known_args()
def validate_args(self):
# validate that we support this framework by checking folder names
benchmark_dir = os.path.dirname(os.path.realpath(__file__))
if glob.glob("{}/*/{}".format(benchmark_dir, self.args.framework)) == []:
raise ValueError("The specified framework is not supported: {}".
format(self.args.framework))
# if neither benchmark_only or accuracy_only are specified, then enable
# benchmark_only as the default
if not self.args.benchmark_only and not self.args.accuracy_only:
self.args.benchmark_only = True
# default disable_tcmalloc=False for int8 and disable_tcmalloc=True for other precisions
if not self.args.disable_tcmalloc:
self.args.disable_tcmalloc = str(self.args.precision != "int8")
if self.args.custom_volumes and not self.args.docker_image:
raise ValueError("Volume mounts can only be used when running in a docker container "
"(a --docker-image must be specified when using --volume).")
if self.args.mode == "inference" and self.args.checkpoint:
print("Warning: The --checkpoint argument is being deprecated in favor of using frozen graphs.")
def get_model_use_case(self, benchmark_scripts):
args = self.args
# find the path to the model's benchmarks folder
search_path = os.path.join(
benchmark_scripts, "*", args.framework, args.model_name,
args.mode, args.precision)
matches = glob.glob(search_path)
error_str = ""
if len(matches) > 1:
error_str = "Found multiple model locations for {} {} {}"
elif len(matches) == 0:
error_str = "No model was found for {} {} {}"
if error_str:
raise ValueError(error_str.format(args.framework, args.model_name, args.precision))
dir_list = matches[0].split("/")
return next(dir_list[elem - 1] for elem in range(len(dir_list) - 1, -1, -1)
if dir_list[elem] == args.framework)
def get_model_dir(self, benchmark_scripts, use_case):
intelai_models = os.path.join(benchmark_scripts, os.pardir, "models")
if use_case == "common":
return os.path.join(intelai_models, "common", self.args.framework)
args = self.args
optimized_model_dir = os.path.join(
benchmark_scripts, os.pardir, "models", use_case,
args.framework, args.model_name)
if os.path.isdir(optimized_model_dir):
intelai_models = optimized_model_dir
return intelai_models
def get_env_vars(self, benchmark_scripts, use_case, intelai_models,
intelai_models_common):
args = self.args
env_var_dict = {
"ACCURACY_ONLY": args.accuracy_only,
"BACKBONE_MODEL_DIRECTORY_VOL": args.backbone_model,
"BATCH_SIZE": args.batch_size,
"BENCHMARK_ONLY": args.benchmark_only,
"BENCHMARK_SCRIPTS": benchmark_scripts,
"CHECKPOINT_DIRECTORY_VOL": args.checkpoint,
"DATASET_LOCATION_VOL": args.data_location,
"DATA_NUM_INTER_THREADS": args.data_num_inter_threads,
"DATA_NUM_INTRA_THREADS": args.data_num_intra_threads,
"DISABLE_TCMALLOC": args.disable_tcmalloc,
"DOCKER": args.docker_image or str(args.docker_image is not None),
"EXTERNAL_MODELS_SOURCE_DIRECTORY": args.model_source_dir,
"FRAMEWORK": args.framework,
"INTELAI_MODELS": intelai_models,
"INTELAI_MODELS_COMMON": intelai_models_common,
"MODE": args.mode,
"MODEL_NAME": args.model_name,
"MPI_HOSTNAMES": args.mpi_hostnames,
"MPI_NUM_PROCESSES": args.mpi,
"MPI_NUM_PROCESSES_PER_SOCKET": args.num_mpi,
"NOINSTALL": str(args.noinstall) if args.noinstall is not None else "True" if not args.docker_image else "False",
"NUM_CORES": args.num_cores,
"NUM_INTER_THREADS": args.num_inter_threads,
"NUM_INTRA_THREADS": args.num_intra_threads,
"NUM_TRAIN_STEPS": args.num_train_steps,
"OUTPUT_RESULTS": args.output_results,
"PRECISION": args.precision,
"PYTHON_EXE": sys.executable if not args.docker_image else "python",
"SOCKET_ID": args.socket_id,
"TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD": args.tcmalloc_large_alloc_report_threshold,
"TF_SERVING_VERSION": args.tf_serving_version,
"USE_CASE": use_case,
"VERBOSE": args.verbose
}
for custom_arg in args.model_args + self.unknown_args:
if "=" not in custom_arg:
raise ValueError("Expected model args in the format "
"`name=value` but received: {}".
format(custom_arg))
split_arg = custom_arg.split("=")
split_arg[0] = split_arg[0].replace("-", "_").lstrip('_')
env_var_dict[split_arg[0]] = split_arg[1]
return env_var_dict
def run_bare_metal(self, benchmark_scripts, intelai_models,
intelai_models_common, env_var_dict):
# mounting volumes when running bare metal, but start.sh expects these args
args = self.args
workspace = os.path.join(benchmark_scripts, "common", args.framework)
mount_benchmark = benchmark_scripts
in_graph_path = args.input_graph
checkpoint_path = args.checkpoint
backbone_model_path = args.backbone_model
dataset_path = args.data_location
mount_external_models_source = args.model_source_dir
mount_intelai_models = intelai_models
# To Launch Tensorflow Serving benchmark we need only --in-graph arg.
# It does not support checkpoint files.
if args.framework == "tensorflow_serving":
if checkpoint_path:
raise ValueError("--checkpoint-path arg is not supported with tensorflow serving benchmarking")
if args.mode != "inference":
raise ValueError("--mode arg should be set to inference")
if in_graph_path:
env_var_dict["IN_GRAPH"] = in_graph_path
else:
raise ValueError("--in-graph arg is required to run tensorflow serving benchmarking")
for env_var_name in env_var_dict:
os.environ[env_var_name] = str(env_var_dict[env_var_name])
# We need this env to be set for the platform util
os.environ["PYTHON_EXE"] = str(sys.executable if not args.docker_image else "python")
# Get Platformutil
platform_util_obj = None or platform_util.PlatformUtil(self.args)
# Configure num_inter_threads and num_intra_threads
base_obj = BaseModelInitializer(args=self.args, custom_args=[], platform_util=platform_util_obj)
base_obj.set_num_inter_intra_threads()
# Update num_inter_threads and num_intra_threads in env dictionary
env_var_dict["NUM_INTER_THREADS"] = self.args.num_inter_threads
env_var_dict["NUM_INTRA_THREADS"] = self.args.num_intra_threads
# Set OMP_NUM_THREADS
env_var_dict["OMP_NUM_THREADS"] = self.args.num_intra_threads
else:
mount_external_models_source = args.model_source_dir
mount_intelai_models = intelai_models
mount_intelai_models_common = intelai_models_common
# Add env vars with bare metal settings
env_var_dict["MOUNT_EXTERNAL_MODELS_SOURCE"] = mount_external_models_source
env_var_dict["MOUNT_INTELAI_MODELS_SOURCE"] = mount_intelai_models
env_var_dict["MOUNT_INTELAI_MODELS_COMMON_SOURCE"] = mount_intelai_models_common
if in_graph_path:
env_var_dict["IN_GRAPH"] = in_graph_path
if checkpoint_path:
env_var_dict["CHECKPOINT_DIRECTORY"] = checkpoint_path
if backbone_model_path:
env_var_dict["BACKBONE_MODEL_DIRECTORY"] = backbone_model_path
if dataset_path:
env_var_dict["DATASET_LOCATION"] = dataset_path
# if using the default output directory, get the full path
if args.output_dir == "/models/benchmarks/common/tensorflow/logs":
args.output_dir = os.path.join(workspace, "logs")
# Add env vars with bare metal settings
env_var_dict["WORKSPACE"] = workspace
env_var_dict["MOUNT_BENCHMARK"] = mount_benchmark
env_var_dict["OUTPUT_DIR"] = args.output_dir
# Set env vars for bare metal
for env_var_name in env_var_dict:
os.environ[env_var_name] = str(env_var_dict[env_var_name])
# Run the start script
start_script = os.path.join(workspace, "start.sh")
self._launch_command(["bash", start_script])
def run_docker_container(self, benchmark_scripts, intelai_models,
intelai_models_common, env_var_dict):
args = self.args
mount_benchmark = "/workspace/benchmarks"
mount_external_models_source = "/workspace/models"
mount_intelai_models = "/workspace/intelai_models"
mount_intelai_models_common = "/workspace/intelai_models_common"
workspace = os.path.join(mount_benchmark, "common", args.framework)
mount_output_dir = False
output_dir = os.path.join(workspace, 'logs')
if args.output_dir != "/models/benchmarks/common/tensorflow/logs":
# we don't need to mount log dir otherwise since default is workspace folder
mount_output_dir = True
output_dir = args.output_dir
in_graph_dir = os.path.dirname(args.input_graph) if args.input_graph \
else ""
in_graph_filename = os.path.basename(args.input_graph) if \
args.input_graph else ""
env_vars = ["--env", "WORKSPACE={}".format(workspace),
"--env", "MOUNT_BENCHMARK={}".format(mount_benchmark),
"--env", "MOUNT_EXTERNAL_MODELS_SOURCE={}".format(mount_external_models_source),
"--env", "MOUNT_INTELAI_MODELS_SOURCE={}".format(mount_intelai_models),
"--env", "MOUNT_INTELAI_MODELS_COMMON_SOURCE={}".format(mount_intelai_models_common),
"--env", "OUTPUT_DIR={}".format(output_dir)]
if args.input_graph:
env_vars += ["--env", "IN_GRAPH=/in_graph/{}".format(in_graph_filename)]
if args.data_location:
env_vars += ["--env", "DATASET_LOCATION=/dataset"]
if args.checkpoint:
env_vars += ["--env", "CHECKPOINT_DIRECTORY=/checkpoints"]
if args.backbone_model:
env_vars += ["--env", "BACKBONE_MODEL_DIRECTORY=/backbone_model"]
for env_var_name in env_var_dict:
env_vars += ["--env", "{}={}".format(env_var_name, env_var_dict[env_var_name])]
for environment_proxy_setting in [
"http_proxy",
"ftp_proxy",
"https_proxy",
"no_proxy",
]:
if not os.environ.get(environment_proxy_setting):
continue
env_vars.append("--env")
env_vars.append("{}={}".format(
environment_proxy_setting,
os.environ.get(environment_proxy_setting)
))
volume_mounts = ["--volume", "{}:{}".format(benchmark_scripts, mount_benchmark),
"--volume", "{}:{}".format(args.model_source_dir, mount_external_models_source),
"--volume", "{}:{}".format(intelai_models, mount_intelai_models),
"--volume", "{}:{}".format(intelai_models_common, mount_intelai_models_common)]
if mount_output_dir:
volume_mounts.extend([
"--volume", "{}:{}".format(output_dir, output_dir)])
if args.data_location:
volume_mounts.extend([
"--volume", "{}:{}".format(args.data_location, "/dataset")])
if args.checkpoint:
volume_mounts.extend([
"--volume", "{}:{}".format(args.checkpoint, "/checkpoints")])
if args.backbone_model:
volume_mounts.extend([
"--volume", "{}:{}".format(args.backbone_model, "/backbone_model")])
if in_graph_dir:
volume_mounts.extend([
"--volume", "{}:{}".format(in_graph_dir, "/in_graph")])
if args.custom_volumes:
for custom_volume in args.custom_volumes:
volume_mounts.extend(["--volume", custom_volume])
docker_run_cmd = ["docker", "run"]
if args.debug:
docker_run_cmd.append("-it")
docker_shm_size = "--shm-size={}".format(args.shm_size)
docker_run_cmd = docker_run_cmd + env_vars + volume_mounts + [
docker_shm_size, "--privileged", "-u", "root:root", "-w",
workspace, args.docker_image, "/bin/bash"]
if not args.debug:
docker_run_cmd.append("start.sh")
if args.verbose:
print("Docker run command:\n{}".format(docker_run_cmd))
self._launch_command(docker_run_cmd)
def _launch_command(self, run_cmd):
p = subprocess.Popen(run_cmd, preexec_fn=os.setsid)
try:
p.communicate()
except KeyboardInterrupt:
os.killpg(os.getpgid(p.pid), signal.SIGKILL)
if __name__ == "__main__":
util = LaunchBenchmark()
util.main()
| true | true |
f72e56127d3745b7e19a89a5d8f92b2706b59d1b | 31,530 | py | Python | dvc/remote/base.py | e3bo/dvc | 05b9f425863f259fd72e6c83e31326e4aab27826 | [
"Apache-2.0"
] | null | null | null | dvc/remote/base.py | e3bo/dvc | 05b9f425863f259fd72e6c83e31326e4aab27826 | [
"Apache-2.0"
] | null | null | null | dvc/remote/base.py | e3bo/dvc | 05b9f425863f259fd72e6c83e31326e4aab27826 | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
from dvc.utils.compat import basestring, FileNotFoundError, str, urlparse
import itertools
import json
import logging
import tempfile
from concurrent.futures import ThreadPoolExecutor
from copy import copy
from functools import partial
from multiprocessing import cpu_count
from operator import itemgetter
from shortuuid import uuid
import dvc.prompt as prompt
from dvc.config import Config
from dvc.exceptions import (
DvcException,
ConfirmRemoveError,
DvcIgnoreInCollectedDirError,
)
from dvc.ignore import DvcIgnore
from dvc.path_info import PathInfo, URLInfo
from dvc.progress import Tqdm
from dvc.remote.slow_link_detection import slow_link_guard
from dvc.state import StateNoop
from dvc.utils import makedirs, relpath, tmp_fname
from dvc.utils.fs import move
from dvc.utils.http import open_url
logger = logging.getLogger(__name__)
STATUS_OK = 1
STATUS_MISSING = 2
STATUS_NEW = 3
STATUS_DELETED = 4
STATUS_MAP = {
# (local_exists, remote_exists)
(True, True): STATUS_OK,
(False, False): STATUS_MISSING,
(True, False): STATUS_NEW,
(False, True): STATUS_DELETED,
}
class RemoteCmdError(DvcException):
def __init__(self, remote, cmd, ret, err):
super(RemoteCmdError, self).__init__(
"{remote} command '{cmd}' finished with non-zero return code"
" {ret}': {err}".format(remote=remote, cmd=cmd, ret=ret, err=err)
)
class RemoteActionNotImplemented(DvcException):
def __init__(self, action, scheme):
m = "{} is not supported by {} remote".format(action, scheme)
super(RemoteActionNotImplemented, self).__init__(m)
class RemoteMissingDepsError(DvcException):
pass
class DirCacheError(DvcException):
def __init__(self, checksum, cause=None):
super(DirCacheError, self).__init__(
"Failed to load dir cache for checksum: '{}'.".format(checksum),
cause=cause,
)
class RemoteBASE(object):
scheme = "base"
path_cls = URLInfo
REQUIRES = {}
JOBS = 4 * cpu_count()
PARAM_RELPATH = "relpath"
CHECKSUM_DIR_SUFFIX = ".dir"
CHECKSUM_JOBS = max(1, min(4, cpu_count() // 2))
DEFAULT_CACHE_TYPES = ["copy"]
state = StateNoop()
def __init__(self, repo, config):
self.repo = repo
self._check_requires(config)
core = config.get(Config.SECTION_CORE, {})
self.checksum_jobs = core.get(
Config.SECTION_CORE_CHECKSUM_JOBS, self.CHECKSUM_JOBS
)
self.protected = False
self.no_traverse = config.get(Config.SECTION_REMOTE_NO_TRAVERSE, True)
self._dir_info = {}
types = config.get(Config.SECTION_CACHE_TYPE, None)
if types:
if isinstance(types, str):
types = [t.strip() for t in types.split(",")]
self.cache_types = types
else:
self.cache_types = copy(self.DEFAULT_CACHE_TYPES)
self.cache_type_confirmed = False
def _check_requires(self, config):
import importlib
missing = []
for package, module in self.REQUIRES.items():
try:
importlib.import_module(module)
except ImportError:
missing.append(package)
if not missing:
return
url = config.get(
Config.SECTION_REMOTE_URL, "{}://".format(self.scheme)
)
msg = (
"URL '{}' is supported but requires these missing "
"dependencies: {}. If you have installed dvc using pip, "
"choose one of these options to proceed: \n"
"\n"
" 1) Install specific missing dependencies:\n"
" pip install {}\n"
" 2) Install dvc package that includes those missing "
"dependencies: \n"
" pip install 'dvc[{}]'\n"
" 3) Install dvc package with all possible "
"dependencies included: \n"
" pip install 'dvc[all]'\n"
"\n"
"If you have installed dvc from a binary package and you "
"are still seeing this message, please report it to us "
"using https://github.com/iterative/dvc/issues. Thank you!"
).format(url, missing, " ".join(missing), self.scheme)
raise RemoteMissingDepsError(msg)
def __repr__(self):
return "{class_name}: '{path_info}'".format(
class_name=type(self).__name__,
path_info=self.path_info or "No path",
)
@classmethod
def supported(cls, config):
if isinstance(config, basestring):
url = config
else:
url = config[Config.SECTION_REMOTE_URL]
# NOTE: silently skipping remote, calling code should handle that
parsed = urlparse(url)
return parsed.scheme == cls.scheme
@property
def cache(self):
return getattr(self.repo.cache, self.scheme)
def get_file_checksum(self, path_info):
raise NotImplementedError
def _calculate_checksums(self, file_infos):
file_infos = list(file_infos)
with ThreadPoolExecutor(max_workers=self.checksum_jobs) as executor:
tasks = executor.map(self.get_file_checksum, file_infos)
with Tqdm(
tasks,
total=len(file_infos),
unit="md5",
desc="Computing hashes (only done once)",
) as tasks:
checksums = dict(zip(file_infos, tasks))
return checksums
def _collect_dir(self, path_info):
file_infos = set()
for fname in self.walk_files(path_info):
if DvcIgnore.DVCIGNORE_FILE == fname.name:
raise DvcIgnoreInCollectedDirError(fname.parent)
file_infos.add(fname)
checksums = {fi: self.state.get(fi) for fi in file_infos}
not_in_state = {
fi for fi, checksum in checksums.items() if checksum is None
}
new_checksums = self._calculate_checksums(not_in_state)
checksums.update(new_checksums)
result = [
{
self.PARAM_CHECKSUM: checksums[fi],
# NOTE: this is lossy transformation:
# "hey\there" -> "hey/there"
# "hey/there" -> "hey/there"
# The latter is fine filename on Windows, which
# will transform to dir/file on back transform.
#
# Yes, this is a BUG, as long as we permit "/" in
# filenames on Windows and "\" on Unix
self.PARAM_RELPATH: fi.relative_to(path_info).as_posix(),
}
for fi in file_infos
]
# Sorting the list by path to ensure reproducibility
return sorted(result, key=itemgetter(self.PARAM_RELPATH))
def get_dir_checksum(self, path_info):
dir_info = self._collect_dir(path_info)
checksum, tmp_info = self._get_dir_info_checksum(dir_info)
new_info = self.cache.checksum_to_path_info(checksum)
if self.cache.changed_cache_file(checksum):
self.cache.makedirs(new_info.parent)
self.cache.move(tmp_info, new_info)
self.state.save(path_info, checksum)
self.state.save(new_info, checksum)
return checksum
def _get_dir_info_checksum(self, dir_info):
tmp = tempfile.NamedTemporaryFile(delete=False).name
with open(tmp, "w+") as fobj:
json.dump(dir_info, fobj, sort_keys=True)
from_info = PathInfo(tmp)
to_info = self.cache.path_info / tmp_fname("")
self.cache.upload(from_info, to_info, no_progress_bar=True)
checksum = self.get_file_checksum(to_info) + self.CHECKSUM_DIR_SUFFIX
return checksum, to_info
def get_dir_cache(self, checksum):
assert checksum
dir_info = self._dir_info.get(checksum)
if dir_info:
return dir_info
try:
dir_info = self.load_dir_cache(checksum)
except DirCacheError:
dir_info = []
self._dir_info[checksum] = dir_info
return dir_info
def load_dir_cache(self, checksum):
path_info = self.checksum_to_path_info(checksum)
try:
with self.cache.open(path_info, "r") as fobj:
d = json.load(fobj)
except (ValueError, FileNotFoundError) as exc:
raise DirCacheError(checksum, cause=exc)
if not isinstance(d, list):
msg = "dir cache file format error '{}' [skipping the file]"
logger.error(msg.format(relpath(path_info)))
return []
for info in d:
# NOTE: here is a BUG, see comment to .as_posix() below
relative_path = PathInfo.from_posix(info[self.PARAM_RELPATH])
info[self.PARAM_RELPATH] = relative_path.fspath
return d
@classmethod
def is_dir_checksum(cls, checksum):
return checksum.endswith(cls.CHECKSUM_DIR_SUFFIX)
def get_checksum(self, path_info):
assert path_info.scheme == self.scheme
if not self.exists(path_info):
return None
checksum = self.state.get(path_info)
# If we have dir checksum in state db, but dir cache file is lost,
# then we need to recollect the dir via .get_dir_checksum() call below,
# see https://github.com/iterative/dvc/issues/2219 for context
if (
checksum
and self.is_dir_checksum(checksum)
and not self.exists(self.cache.checksum_to_path_info(checksum))
):
checksum = None
if checksum:
return checksum
if self.isdir(path_info):
checksum = self.get_dir_checksum(path_info)
else:
checksum = self.get_file_checksum(path_info)
if checksum:
self.state.save(path_info, checksum)
return checksum
def save_info(self, path_info):
return {self.PARAM_CHECKSUM: self.get_checksum(path_info)}
def changed(self, path_info, checksum_info):
"""Checks if data has changed.
A file is considered changed if:
- It doesn't exist on the working directory (was unlinked)
- Checksum is not computed (saving a new file)
- The checkusm stored in the State is different from the given one
- There's no file in the cache
Args:
path_info: dict with path information.
checksum: expected checksum for this data.
Returns:
bool: True if data has changed, False otherwise.
"""
logger.debug(
"checking if '{}'('{}') has changed.".format(
path_info, checksum_info
)
)
if not self.exists(path_info):
logger.debug("'{}' doesn't exist.".format(path_info))
return True
checksum = checksum_info.get(self.PARAM_CHECKSUM)
if checksum is None:
logger.debug("checksum for '{}' is missing.".format(path_info))
return True
if self.changed_cache(checksum):
logger.debug(
"cache for '{}'('{}') has changed.".format(path_info, checksum)
)
return True
actual = self.get_checksum(path_info)
if checksum != actual:
logger.debug(
"checksum '{}'(actual '{}') for '{}' has changed.".format(
checksum, actual, path_info
)
)
return True
logger.debug("'{}' hasn't changed.".format(path_info))
return False
def link(self, from_info, to_info):
self._link(from_info, to_info, self.cache_types)
def _link(self, from_info, to_info, link_types):
assert self.isfile(from_info)
self.makedirs(to_info.parent)
self._try_links(from_info, to_info, link_types)
@slow_link_guard
def _try_links(self, from_info, to_info, link_types):
while link_types:
link_method = getattr(self, link_types[0])
try:
self._do_link(from_info, to_info, link_method)
self.cache_type_confirmed = True
return
except DvcException as exc:
msg = "Cache type '{}' is not supported: {}"
logger.debug(msg.format(link_types[0], str(exc)))
del link_types[0]
raise DvcException("no possible cache types left to try out.")
def _do_link(self, from_info, to_info, link_method):
if self.exists(to_info):
raise DvcException("Link '{}' already exists!".format(to_info))
link_method(from_info, to_info)
if self.protected:
self.protect(to_info)
msg = "Created {}'{}': {} -> {}".format(
"protected " if self.protected else "",
self.cache_types[0],
from_info,
to_info,
)
logger.debug(msg)
def _save_file(self, path_info, checksum, save_link=True):
assert checksum
cache_info = self.checksum_to_path_info(checksum)
if self.changed_cache(checksum):
self.move(path_info, cache_info)
self.link(cache_info, path_info)
elif self.iscopy(path_info) and self._cache_is_copy(path_info):
# Default relink procedure involves unneeded copy
if self.protected:
self.protect(path_info)
else:
self.unprotect(path_info)
else:
self.remove(path_info)
self.link(cache_info, path_info)
if save_link:
self.state.save_link(path_info)
# we need to update path and cache, since in case of reflink,
# or copy cache type moving original file results in updates on
# next executed command, which causes md5 recalculation
self.state.save(path_info, checksum)
self.state.save(cache_info, checksum)
def _cache_is_copy(self, path_info):
"""Checks whether cache uses copies."""
if self.cache_type_confirmed:
return self.cache_types[0] == "copy"
if set(self.cache_types) <= {"copy"}:
return True
workspace_file = path_info.with_name("." + uuid())
test_cache_file = self.path_info / ".cache_type_test_file"
if not self.exists(test_cache_file):
with self.open(test_cache_file, "wb") as fobj:
fobj.write(bytes(1))
try:
self.link(test_cache_file, workspace_file)
finally:
self.remove(workspace_file)
self.remove(test_cache_file)
self.cache_type_confirmed = True
return self.cache_types[0] == "copy"
def _save_dir(self, path_info, checksum):
cache_info = self.checksum_to_path_info(checksum)
dir_info = self.get_dir_cache(checksum)
for entry in dir_info:
entry_info = path_info / entry[self.PARAM_RELPATH]
entry_checksum = entry[self.PARAM_CHECKSUM]
self._save_file(entry_info, entry_checksum, save_link=False)
self.state.save_link(path_info)
self.state.save(cache_info, checksum)
self.state.save(path_info, checksum)
def is_empty(self, path_info):
return False
def isfile(self, path_info):
"""Optional: Overwrite only if the remote has a way to distinguish
between a directory and a file.
"""
return True
def isdir(self, path_info):
"""Optional: Overwrite only if the remote has a way to distinguish
between a directory and a file.
"""
return False
def iscopy(self, path_info):
"""Check if this file is an independent copy."""
return False # We can't be sure by default
def walk_files(self, path_info):
"""Return a generator with `PathInfo`s to all the files"""
raise NotImplementedError
@staticmethod
def protect(path_info):
pass
def save(self, path_info, checksum_info):
if path_info.scheme != self.scheme:
raise RemoteActionNotImplemented(
"save {} -> {}".format(path_info.scheme, self.scheme),
self.scheme,
)
checksum = checksum_info[self.PARAM_CHECKSUM]
self._save(path_info, checksum)
def _save(self, path_info, checksum):
to_info = self.checksum_to_path_info(checksum)
logger.debug("Saving '{}' to '{}'.".format(path_info, to_info))
if self.isdir(path_info):
self._save_dir(path_info, checksum)
return
self._save_file(path_info, checksum)
def upload(self, from_info, to_info, name=None, no_progress_bar=False):
if not hasattr(self, "_upload"):
raise RemoteActionNotImplemented("upload", self.scheme)
if to_info.scheme != self.scheme:
raise NotImplementedError
if from_info.scheme != "local":
raise NotImplementedError
logger.debug("Uploading '{}' to '{}'".format(from_info, to_info))
name = name or from_info.name
try:
self._upload(
from_info.fspath,
to_info,
name=name,
no_progress_bar=no_progress_bar,
)
except Exception:
msg = "failed to upload '{}' to '{}'"
logger.exception(msg.format(from_info, to_info))
return 1 # 1 fail
return 0
def download(
self,
from_info,
to_info,
name=None,
no_progress_bar=False,
file_mode=None,
dir_mode=None,
):
if not hasattr(self, "_download"):
raise RemoteActionNotImplemented("download", self.scheme)
if from_info.scheme != self.scheme:
raise NotImplementedError
if to_info.scheme == self.scheme != "local":
self.copy(from_info, to_info)
return 0
if to_info.scheme != "local":
raise NotImplementedError
if self.isdir(from_info):
return self._download_dir(
from_info, to_info, name, no_progress_bar, file_mode, dir_mode
)
return self._download_file(
from_info, to_info, name, no_progress_bar, file_mode, dir_mode
)
def _download_dir(
self, from_info, to_info, name, no_progress_bar, file_mode, dir_mode
):
from_infos = list(self.walk_files(from_info))
to_infos = (
to_info / info.relative_to(from_info) for info in from_infos
)
with ThreadPoolExecutor(max_workers=self.JOBS) as executor:
download_files = partial(
self._download_file,
name=name,
no_progress_bar=True,
file_mode=file_mode,
dir_mode=dir_mode,
)
futures = executor.map(download_files, from_infos, to_infos)
with Tqdm(
futures,
total=len(from_infos),
desc="Downloading directory",
unit="Files",
disable=no_progress_bar,
) as futures:
return sum(futures)
def _download_file(
self, from_info, to_info, name, no_progress_bar, file_mode, dir_mode
):
makedirs(to_info.parent, exist_ok=True, mode=dir_mode)
logger.debug("Downloading '{}' to '{}'".format(from_info, to_info))
name = name or to_info.name
tmp_file = tmp_fname(to_info)
try:
self._download(
from_info, tmp_file, name=name, no_progress_bar=no_progress_bar
)
except Exception:
msg = "failed to download '{}' to '{}'"
logger.exception(msg.format(from_info, to_info))
return 1 # 1 fail
move(tmp_file, to_info, mode=file_mode)
return 0
def open(self, path_info, mode="r", encoding=None):
if hasattr(self, "_generate_download_url"):
get_url = partial(self._generate_download_url, path_info)
return open_url(get_url, mode=mode, encoding=encoding)
raise RemoteActionNotImplemented("open", self.scheme)
def remove(self, path_info):
raise RemoteActionNotImplemented("remove", self.scheme)
def move(self, from_info, to_info):
self.copy(from_info, to_info)
self.remove(from_info)
def copy(self, from_info, to_info):
raise RemoteActionNotImplemented("copy", self.scheme)
def symlink(self, from_info, to_info):
raise RemoteActionNotImplemented("symlink", self.scheme)
def hardlink(self, from_info, to_info):
raise RemoteActionNotImplemented("hardlink", self.scheme)
def reflink(self, from_info, to_info):
raise RemoteActionNotImplemented("reflink", self.scheme)
def exists(self, path_info):
raise NotImplementedError
def path_to_checksum(self, path):
parts = self.path_cls(path).parts[-2:]
if not (len(parts) == 2 and parts[0] and len(parts[0]) == 2):
raise ValueError("Bad cache file path")
return "".join(parts)
def checksum_to_path_info(self, checksum):
return self.path_info / checksum[0:2] / checksum[2:]
def list_cache_paths(self):
raise NotImplementedError
def all(self):
# NOTE: The list might be way too big(e.g. 100M entries, md5 for each
# is 32 bytes, so ~3200Mb list) and we don't really need all of it at
# the same time, so it makes sense to use a generator to gradually
# iterate over it, without keeping all of it in memory.
for path in self.list_cache_paths():
try:
yield self.path_to_checksum(path)
except ValueError:
# We ignore all the non-cache looking files
pass
def gc(self, named_cache):
used = self.extract_used_local_checksums(named_cache)
if self.scheme != "":
used.update(named_cache[self.scheme])
removed = False
for checksum in self.all():
if checksum in used:
continue
path_info = self.checksum_to_path_info(checksum)
self.remove(path_info)
removed = True
return removed
def changed_cache_file(self, checksum):
"""Compare the given checksum with the (corresponding) actual one.
- Use `State` as a cache for computed checksums
+ The entries are invalidated by taking into account the following:
* mtime
* inode
* size
* checksum
- Remove the file from cache if it doesn't match the actual checksum
"""
cache_info = self.checksum_to_path_info(checksum)
actual = self.get_checksum(cache_info)
logger.debug(
"cache '{}' expected '{}' actual '{}'".format(
str(cache_info), checksum, actual
)
)
if not checksum or not actual:
return True
if actual.split(".")[0] == checksum.split(".")[0]:
return False
if self.exists(cache_info):
logger.warning("corrupted cache file '{}'.".format(cache_info))
self.remove(cache_info)
return True
def _changed_dir_cache(self, checksum):
if self.changed_cache_file(checksum):
return True
if not self._changed_unpacked_dir(checksum):
return False
for entry in self.get_dir_cache(checksum):
entry_checksum = entry[self.PARAM_CHECKSUM]
if self.changed_cache_file(entry_checksum):
return True
self._update_unpacked_dir(checksum)
return False
def changed_cache(self, checksum):
if self.is_dir_checksum(checksum):
return self._changed_dir_cache(checksum)
return self.changed_cache_file(checksum)
def cache_exists(self, checksums, jobs=None, name=None):
"""Check if the given checksums are stored in the remote.
There are two ways of performing this check:
- Traverse: Get a list of all the files in the remote
(traversing the cache directory) and compare it with
the given checksums.
- No traverse: For each given checksum, run the `exists`
method and filter the checksums that aren't on the remote.
This is done in parallel threads.
It also shows a progress bar when performing the check.
The reason for such an odd logic is that most of the remotes
take much shorter time to just retrieve everything they have under
a certain prefix (e.g. s3, gs, ssh, hdfs). Other remotes that can
check if particular file exists much quicker, use their own
implementation of cache_exists (see ssh, local).
Returns:
A list with checksums that were found in the remote
"""
if not self.no_traverse:
return list(set(checksums) & set(self.all()))
with Tqdm(
desc="Querying "
+ ("cache in " + name if name else "remote cache"),
total=len(checksums),
unit="file",
) as pbar:
def exists_with_progress(path_info):
ret = self.exists(path_info)
pbar.update_desc(str(path_info))
return ret
with ThreadPoolExecutor(max_workers=jobs or self.JOBS) as executor:
path_infos = map(self.checksum_to_path_info, checksums)
in_remote = executor.map(exists_with_progress, path_infos)
ret = list(itertools.compress(checksums, in_remote))
return ret
def already_cached(self, path_info):
current = self.get_checksum(path_info)
if not current:
return False
return not self.changed_cache(current)
def safe_remove(self, path_info, force=False):
if not self.exists(path_info):
return
if not force and not self.already_cached(path_info):
msg = (
"file '{}' is going to be removed."
" Are you sure you want to proceed?".format(str(path_info))
)
if not prompt.confirm(msg):
raise ConfirmRemoveError(str(path_info))
self.remove(path_info)
def _checkout_file(
self, path_info, checksum, force, progress_callback=None
):
"""The file is changed we need to checkout a new copy"""
cache_info = self.checksum_to_path_info(checksum)
if self.exists(path_info):
msg = "data '{}' exists. Removing before checkout."
logger.warning(msg.format(str(path_info)))
self.safe_remove(path_info, force=force)
self.link(cache_info, path_info)
self.state.save_link(path_info)
self.state.save(path_info, checksum)
if progress_callback:
progress_callback(str(path_info))
def makedirs(self, path_info):
"""Optional: Implement only if the remote needs to create
directories before copying/linking/moving data
"""
pass
def _checkout_dir(
self, path_info, checksum, force, progress_callback=None, relink=False
):
# Create dir separately so that dir is created
# even if there are no files in it
if not self.exists(path_info):
self.makedirs(path_info)
dir_info = self.get_dir_cache(checksum)
logger.debug("Linking directory '{}'.".format(path_info))
for entry in dir_info:
relative_path = entry[self.PARAM_RELPATH]
entry_checksum = entry[self.PARAM_CHECKSUM]
entry_cache_info = self.checksum_to_path_info(entry_checksum)
entry_info = path_info / relative_path
entry_checksum_info = {self.PARAM_CHECKSUM: entry_checksum}
if relink or self.changed(entry_info, entry_checksum_info):
self.safe_remove(entry_info, force=force)
self.link(entry_cache_info, entry_info)
self.state.save(entry_info, entry_checksum)
if progress_callback:
progress_callback(str(entry_info))
self._remove_redundant_files(path_info, dir_info, force)
self.state.save_link(path_info)
self.state.save(path_info, checksum)
def _remove_redundant_files(self, path_info, dir_info, force):
existing_files = set(self.walk_files(path_info))
needed_files = {
path_info / entry[self.PARAM_RELPATH] for entry in dir_info
}
for path in existing_files - needed_files:
self.safe_remove(path, force)
def checkout(
self,
path_info,
checksum_info,
force=False,
progress_callback=None,
relink=False,
):
if path_info.scheme not in ["local", self.scheme]:
raise NotImplementedError
checksum = checksum_info.get(self.PARAM_CHECKSUM)
failed = None
skip = False
if not checksum:
logger.warning(
"No checksum info found for '{}'. "
"It won't be created.".format(str(path_info))
)
self.safe_remove(path_info, force=force)
failed = path_info
elif not relink and not self.changed(path_info, checksum_info):
msg = "Data '{}' didn't change."
logger.debug(msg.format(str(path_info)))
skip = True
elif self.changed_cache(checksum):
msg = "Cache '{}' not found. File '{}' won't be created."
logger.warning(msg.format(checksum, str(path_info)))
self.safe_remove(path_info, force=force)
failed = path_info
if failed or skip:
if progress_callback:
progress_callback(
str(path_info), self.get_files_number(checksum)
)
return failed
msg = "Checking out '{}' with cache '{}'."
logger.debug(msg.format(str(path_info), checksum))
self._checkout(path_info, checksum, force, progress_callback, relink)
return None
def _checkout(
self,
path_info,
checksum,
force=False,
progress_callback=None,
relink=False,
):
if not self.is_dir_checksum(checksum):
return self._checkout_file(
path_info, checksum, force, progress_callback=progress_callback
)
return self._checkout_dir(
path_info, checksum, force, progress_callback, relink
)
def get_files_number(self, checksum):
if not checksum:
return 0
if self.is_dir_checksum(checksum):
return len(self.get_dir_cache(checksum))
return 1
@staticmethod
def unprotect(path_info):
pass
def _get_unpacked_dir_names(self, checksums):
return set()
def extract_used_local_checksums(self, named_cache):
used = set(named_cache["local"])
unpacked = self._get_unpacked_dir_names(used)
return used | unpacked
def _changed_unpacked_dir(self, checksum):
return True
def _update_unpacked_dir(self, checksum):
pass
| 32.605998 | 79 | 0.603362 | from __future__ import unicode_literals
from dvc.utils.compat import basestring, FileNotFoundError, str, urlparse
import itertools
import json
import logging
import tempfile
from concurrent.futures import ThreadPoolExecutor
from copy import copy
from functools import partial
from multiprocessing import cpu_count
from operator import itemgetter
from shortuuid import uuid
import dvc.prompt as prompt
from dvc.config import Config
from dvc.exceptions import (
DvcException,
ConfirmRemoveError,
DvcIgnoreInCollectedDirError,
)
from dvc.ignore import DvcIgnore
from dvc.path_info import PathInfo, URLInfo
from dvc.progress import Tqdm
from dvc.remote.slow_link_detection import slow_link_guard
from dvc.state import StateNoop
from dvc.utils import makedirs, relpath, tmp_fname
from dvc.utils.fs import move
from dvc.utils.http import open_url
logger = logging.getLogger(__name__)
STATUS_OK = 1
STATUS_MISSING = 2
STATUS_NEW = 3
STATUS_DELETED = 4
STATUS_MAP = {
(True, True): STATUS_OK,
(False, False): STATUS_MISSING,
(True, False): STATUS_NEW,
(False, True): STATUS_DELETED,
}
class RemoteCmdError(DvcException):
def __init__(self, remote, cmd, ret, err):
super(RemoteCmdError, self).__init__(
"{remote} command '{cmd}' finished with non-zero return code"
" {ret}': {err}".format(remote=remote, cmd=cmd, ret=ret, err=err)
)
class RemoteActionNotImplemented(DvcException):
def __init__(self, action, scheme):
m = "{} is not supported by {} remote".format(action, scheme)
super(RemoteActionNotImplemented, self).__init__(m)
class RemoteMissingDepsError(DvcException):
pass
class DirCacheError(DvcException):
def __init__(self, checksum, cause=None):
super(DirCacheError, self).__init__(
"Failed to load dir cache for checksum: '{}'.".format(checksum),
cause=cause,
)
class RemoteBASE(object):
scheme = "base"
path_cls = URLInfo
REQUIRES = {}
JOBS = 4 * cpu_count()
PARAM_RELPATH = "relpath"
CHECKSUM_DIR_SUFFIX = ".dir"
CHECKSUM_JOBS = max(1, min(4, cpu_count() // 2))
DEFAULT_CACHE_TYPES = ["copy"]
state = StateNoop()
def __init__(self, repo, config):
self.repo = repo
self._check_requires(config)
core = config.get(Config.SECTION_CORE, {})
self.checksum_jobs = core.get(
Config.SECTION_CORE_CHECKSUM_JOBS, self.CHECKSUM_JOBS
)
self.protected = False
self.no_traverse = config.get(Config.SECTION_REMOTE_NO_TRAVERSE, True)
self._dir_info = {}
types = config.get(Config.SECTION_CACHE_TYPE, None)
if types:
if isinstance(types, str):
types = [t.strip() for t in types.split(",")]
self.cache_types = types
else:
self.cache_types = copy(self.DEFAULT_CACHE_TYPES)
self.cache_type_confirmed = False
def _check_requires(self, config):
import importlib
missing = []
for package, module in self.REQUIRES.items():
try:
importlib.import_module(module)
except ImportError:
missing.append(package)
if not missing:
return
url = config.get(
Config.SECTION_REMOTE_URL, "{}://".format(self.scheme)
)
msg = (
"URL '{}' is supported but requires these missing "
"dependencies: {}. If you have installed dvc using pip, "
"choose one of these options to proceed: \n"
"\n"
" 1) Install specific missing dependencies:\n"
" pip install {}\n"
" 2) Install dvc package that includes those missing "
"dependencies: \n"
" pip install 'dvc[{}]'\n"
" 3) Install dvc package with all possible "
"dependencies included: \n"
" pip install 'dvc[all]'\n"
"\n"
"If you have installed dvc from a binary package and you "
"are still seeing this message, please report it to us "
"using https://github.com/iterative/dvc/issues. Thank you!"
).format(url, missing, " ".join(missing), self.scheme)
raise RemoteMissingDepsError(msg)
def __repr__(self):
return "{class_name}: '{path_info}'".format(
class_name=type(self).__name__,
path_info=self.path_info or "No path",
)
@classmethod
def supported(cls, config):
if isinstance(config, basestring):
url = config
else:
url = config[Config.SECTION_REMOTE_URL]
# NOTE: silently skipping remote, calling code should handle that
parsed = urlparse(url)
return parsed.scheme == cls.scheme
@property
def cache(self):
return getattr(self.repo.cache, self.scheme)
def get_file_checksum(self, path_info):
raise NotImplementedError
def _calculate_checksums(self, file_infos):
file_infos = list(file_infos)
with ThreadPoolExecutor(max_workers=self.checksum_jobs) as executor:
tasks = executor.map(self.get_file_checksum, file_infos)
with Tqdm(
tasks,
total=len(file_infos),
unit="md5",
desc="Computing hashes (only done once)",
) as tasks:
checksums = dict(zip(file_infos, tasks))
return checksums
def _collect_dir(self, path_info):
file_infos = set()
for fname in self.walk_files(path_info):
if DvcIgnore.DVCIGNORE_FILE == fname.name:
raise DvcIgnoreInCollectedDirError(fname.parent)
file_infos.add(fname)
checksums = {fi: self.state.get(fi) for fi in file_infos}
not_in_state = {
fi for fi, checksum in checksums.items() if checksum is None
}
new_checksums = self._calculate_checksums(not_in_state)
checksums.update(new_checksums)
result = [
{
self.PARAM_CHECKSUM: checksums[fi],
# NOTE: this is lossy transformation:
# "hey\there" -> "hey/there"
# "hey/there" -> "hey/there"
# The latter is fine filename on Windows, which
# will transform to dir/file on back transform.
#
# Yes, this is a BUG, as long as we permit "/" in
# filenames on Windows and "\" on Unix
self.PARAM_RELPATH: fi.relative_to(path_info).as_posix(),
}
for fi in file_infos
]
# Sorting the list by path to ensure reproducibility
return sorted(result, key=itemgetter(self.PARAM_RELPATH))
def get_dir_checksum(self, path_info):
dir_info = self._collect_dir(path_info)
checksum, tmp_info = self._get_dir_info_checksum(dir_info)
new_info = self.cache.checksum_to_path_info(checksum)
if self.cache.changed_cache_file(checksum):
self.cache.makedirs(new_info.parent)
self.cache.move(tmp_info, new_info)
self.state.save(path_info, checksum)
self.state.save(new_info, checksum)
return checksum
def _get_dir_info_checksum(self, dir_info):
tmp = tempfile.NamedTemporaryFile(delete=False).name
with open(tmp, "w+") as fobj:
json.dump(dir_info, fobj, sort_keys=True)
from_info = PathInfo(tmp)
to_info = self.cache.path_info / tmp_fname("")
self.cache.upload(from_info, to_info, no_progress_bar=True)
checksum = self.get_file_checksum(to_info) + self.CHECKSUM_DIR_SUFFIX
return checksum, to_info
def get_dir_cache(self, checksum):
assert checksum
dir_info = self._dir_info.get(checksum)
if dir_info:
return dir_info
try:
dir_info = self.load_dir_cache(checksum)
except DirCacheError:
dir_info = []
self._dir_info[checksum] = dir_info
return dir_info
def load_dir_cache(self, checksum):
path_info = self.checksum_to_path_info(checksum)
try:
with self.cache.open(path_info, "r") as fobj:
d = json.load(fobj)
except (ValueError, FileNotFoundError) as exc:
raise DirCacheError(checksum, cause=exc)
if not isinstance(d, list):
msg = "dir cache file format error '{}' [skipping the file]"
logger.error(msg.format(relpath(path_info)))
return []
for info in d:
# NOTE: here is a BUG, see comment to .as_posix() below
relative_path = PathInfo.from_posix(info[self.PARAM_RELPATH])
info[self.PARAM_RELPATH] = relative_path.fspath
return d
@classmethod
def is_dir_checksum(cls, checksum):
return checksum.endswith(cls.CHECKSUM_DIR_SUFFIX)
def get_checksum(self, path_info):
assert path_info.scheme == self.scheme
if not self.exists(path_info):
return None
checksum = self.state.get(path_info)
# If we have dir checksum in state db, but dir cache file is lost,
# then we need to recollect the dir via .get_dir_checksum() call below,
# see https://github.com/iterative/dvc/issues/2219 for context
if (
checksum
and self.is_dir_checksum(checksum)
and not self.exists(self.cache.checksum_to_path_info(checksum))
):
checksum = None
if checksum:
return checksum
if self.isdir(path_info):
checksum = self.get_dir_checksum(path_info)
else:
checksum = self.get_file_checksum(path_info)
if checksum:
self.state.save(path_info, checksum)
return checksum
def save_info(self, path_info):
return {self.PARAM_CHECKSUM: self.get_checksum(path_info)}
def changed(self, path_info, checksum_info):
logger.debug(
"checking if '{}'('{}') has changed.".format(
path_info, checksum_info
)
)
if not self.exists(path_info):
logger.debug("'{}' doesn't exist.".format(path_info))
return True
checksum = checksum_info.get(self.PARAM_CHECKSUM)
if checksum is None:
logger.debug("checksum for '{}' is missing.".format(path_info))
return True
if self.changed_cache(checksum):
logger.debug(
"cache for '{}'('{}') has changed.".format(path_info, checksum)
)
return True
actual = self.get_checksum(path_info)
if checksum != actual:
logger.debug(
"checksum '{}'(actual '{}') for '{}' has changed.".format(
checksum, actual, path_info
)
)
return True
logger.debug("'{}' hasn't changed.".format(path_info))
return False
def link(self, from_info, to_info):
self._link(from_info, to_info, self.cache_types)
def _link(self, from_info, to_info, link_types):
assert self.isfile(from_info)
self.makedirs(to_info.parent)
self._try_links(from_info, to_info, link_types)
@slow_link_guard
def _try_links(self, from_info, to_info, link_types):
while link_types:
link_method = getattr(self, link_types[0])
try:
self._do_link(from_info, to_info, link_method)
self.cache_type_confirmed = True
return
except DvcException as exc:
msg = "Cache type '{}' is not supported: {}"
logger.debug(msg.format(link_types[0], str(exc)))
del link_types[0]
raise DvcException("no possible cache types left to try out.")
def _do_link(self, from_info, to_info, link_method):
if self.exists(to_info):
raise DvcException("Link '{}' already exists!".format(to_info))
link_method(from_info, to_info)
if self.protected:
self.protect(to_info)
msg = "Created {}'{}': {} -> {}".format(
"protected " if self.protected else "",
self.cache_types[0],
from_info,
to_info,
)
logger.debug(msg)
def _save_file(self, path_info, checksum, save_link=True):
assert checksum
cache_info = self.checksum_to_path_info(checksum)
if self.changed_cache(checksum):
self.move(path_info, cache_info)
self.link(cache_info, path_info)
elif self.iscopy(path_info) and self._cache_is_copy(path_info):
# Default relink procedure involves unneeded copy
if self.protected:
self.protect(path_info)
else:
self.unprotect(path_info)
else:
self.remove(path_info)
self.link(cache_info, path_info)
if save_link:
self.state.save_link(path_info)
# we need to update path and cache, since in case of reflink,
# or copy cache type moving original file results in updates on
# next executed command, which causes md5 recalculation
self.state.save(path_info, checksum)
self.state.save(cache_info, checksum)
def _cache_is_copy(self, path_info):
if self.cache_type_confirmed:
return self.cache_types[0] == "copy"
if set(self.cache_types) <= {"copy"}:
return True
workspace_file = path_info.with_name("." + uuid())
test_cache_file = self.path_info / ".cache_type_test_file"
if not self.exists(test_cache_file):
with self.open(test_cache_file, "wb") as fobj:
fobj.write(bytes(1))
try:
self.link(test_cache_file, workspace_file)
finally:
self.remove(workspace_file)
self.remove(test_cache_file)
self.cache_type_confirmed = True
return self.cache_types[0] == "copy"
def _save_dir(self, path_info, checksum):
cache_info = self.checksum_to_path_info(checksum)
dir_info = self.get_dir_cache(checksum)
for entry in dir_info:
entry_info = path_info / entry[self.PARAM_RELPATH]
entry_checksum = entry[self.PARAM_CHECKSUM]
self._save_file(entry_info, entry_checksum, save_link=False)
self.state.save_link(path_info)
self.state.save(cache_info, checksum)
self.state.save(path_info, checksum)
def is_empty(self, path_info):
return False
def isfile(self, path_info):
return True
def isdir(self, path_info):
return False
def iscopy(self, path_info):
return False # We can't be sure by default
def walk_files(self, path_info):
raise NotImplementedError
@staticmethod
def protect(path_info):
pass
def save(self, path_info, checksum_info):
if path_info.scheme != self.scheme:
raise RemoteActionNotImplemented(
"save {} -> {}".format(path_info.scheme, self.scheme),
self.scheme,
)
checksum = checksum_info[self.PARAM_CHECKSUM]
self._save(path_info, checksum)
def _save(self, path_info, checksum):
to_info = self.checksum_to_path_info(checksum)
logger.debug("Saving '{}' to '{}'.".format(path_info, to_info))
if self.isdir(path_info):
self._save_dir(path_info, checksum)
return
self._save_file(path_info, checksum)
def upload(self, from_info, to_info, name=None, no_progress_bar=False):
if not hasattr(self, "_upload"):
raise RemoteActionNotImplemented("upload", self.scheme)
if to_info.scheme != self.scheme:
raise NotImplementedError
if from_info.scheme != "local":
raise NotImplementedError
logger.debug("Uploading '{}' to '{}'".format(from_info, to_info))
name = name or from_info.name
try:
self._upload(
from_info.fspath,
to_info,
name=name,
no_progress_bar=no_progress_bar,
)
except Exception:
msg = "failed to upload '{}' to '{}'"
logger.exception(msg.format(from_info, to_info))
return 1
return 0
def download(
self,
from_info,
to_info,
name=None,
no_progress_bar=False,
file_mode=None,
dir_mode=None,
):
if not hasattr(self, "_download"):
raise RemoteActionNotImplemented("download", self.scheme)
if from_info.scheme != self.scheme:
raise NotImplementedError
if to_info.scheme == self.scheme != "local":
self.copy(from_info, to_info)
return 0
if to_info.scheme != "local":
raise NotImplementedError
if self.isdir(from_info):
return self._download_dir(
from_info, to_info, name, no_progress_bar, file_mode, dir_mode
)
return self._download_file(
from_info, to_info, name, no_progress_bar, file_mode, dir_mode
)
def _download_dir(
self, from_info, to_info, name, no_progress_bar, file_mode, dir_mode
):
from_infos = list(self.walk_files(from_info))
to_infos = (
to_info / info.relative_to(from_info) for info in from_infos
)
with ThreadPoolExecutor(max_workers=self.JOBS) as executor:
download_files = partial(
self._download_file,
name=name,
no_progress_bar=True,
file_mode=file_mode,
dir_mode=dir_mode,
)
futures = executor.map(download_files, from_infos, to_infos)
with Tqdm(
futures,
total=len(from_infos),
desc="Downloading directory",
unit="Files",
disable=no_progress_bar,
) as futures:
return sum(futures)
def _download_file(
self, from_info, to_info, name, no_progress_bar, file_mode, dir_mode
):
makedirs(to_info.parent, exist_ok=True, mode=dir_mode)
logger.debug("Downloading '{}' to '{}'".format(from_info, to_info))
name = name or to_info.name
tmp_file = tmp_fname(to_info)
try:
self._download(
from_info, tmp_file, name=name, no_progress_bar=no_progress_bar
)
except Exception:
msg = "failed to download '{}' to '{}'"
logger.exception(msg.format(from_info, to_info))
return 1
move(tmp_file, to_info, mode=file_mode)
return 0
def open(self, path_info, mode="r", encoding=None):
if hasattr(self, "_generate_download_url"):
get_url = partial(self._generate_download_url, path_info)
return open_url(get_url, mode=mode, encoding=encoding)
raise RemoteActionNotImplemented("open", self.scheme)
def remove(self, path_info):
raise RemoteActionNotImplemented("remove", self.scheme)
def move(self, from_info, to_info):
self.copy(from_info, to_info)
self.remove(from_info)
def copy(self, from_info, to_info):
raise RemoteActionNotImplemented("copy", self.scheme)
def symlink(self, from_info, to_info):
raise RemoteActionNotImplemented("symlink", self.scheme)
def hardlink(self, from_info, to_info):
raise RemoteActionNotImplemented("hardlink", self.scheme)
def reflink(self, from_info, to_info):
raise RemoteActionNotImplemented("reflink", self.scheme)
def exists(self, path_info):
raise NotImplementedError
def path_to_checksum(self, path):
parts = self.path_cls(path).parts[-2:]
if not (len(parts) == 2 and parts[0] and len(parts[0]) == 2):
raise ValueError("Bad cache file path")
return "".join(parts)
def checksum_to_path_info(self, checksum):
return self.path_info / checksum[0:2] / checksum[2:]
def list_cache_paths(self):
raise NotImplementedError
def all(self):
# the same time, so it makes sense to use a generator to gradually
# iterate over it, without keeping all of it in memory.
for path in self.list_cache_paths():
try:
yield self.path_to_checksum(path)
except ValueError:
# We ignore all the non-cache looking files
pass
def gc(self, named_cache):
used = self.extract_used_local_checksums(named_cache)
if self.scheme != "":
used.update(named_cache[self.scheme])
removed = False
for checksum in self.all():
if checksum in used:
continue
path_info = self.checksum_to_path_info(checksum)
self.remove(path_info)
removed = True
return removed
def changed_cache_file(self, checksum):
cache_info = self.checksum_to_path_info(checksum)
actual = self.get_checksum(cache_info)
logger.debug(
"cache '{}' expected '{}' actual '{}'".format(
str(cache_info), checksum, actual
)
)
if not checksum or not actual:
return True
if actual.split(".")[0] == checksum.split(".")[0]:
return False
if self.exists(cache_info):
logger.warning("corrupted cache file '{}'.".format(cache_info))
self.remove(cache_info)
return True
def _changed_dir_cache(self, checksum):
if self.changed_cache_file(checksum):
return True
if not self._changed_unpacked_dir(checksum):
return False
for entry in self.get_dir_cache(checksum):
entry_checksum = entry[self.PARAM_CHECKSUM]
if self.changed_cache_file(entry_checksum):
return True
self._update_unpacked_dir(checksum)
return False
def changed_cache(self, checksum):
if self.is_dir_checksum(checksum):
return self._changed_dir_cache(checksum)
return self.changed_cache_file(checksum)
def cache_exists(self, checksums, jobs=None, name=None):
if not self.no_traverse:
return list(set(checksums) & set(self.all()))
with Tqdm(
desc="Querying "
+ ("cache in " + name if name else "remote cache"),
total=len(checksums),
unit="file",
) as pbar:
def exists_with_progress(path_info):
ret = self.exists(path_info)
pbar.update_desc(str(path_info))
return ret
with ThreadPoolExecutor(max_workers=jobs or self.JOBS) as executor:
path_infos = map(self.checksum_to_path_info, checksums)
in_remote = executor.map(exists_with_progress, path_infos)
ret = list(itertools.compress(checksums, in_remote))
return ret
def already_cached(self, path_info):
current = self.get_checksum(path_info)
if not current:
return False
return not self.changed_cache(current)
def safe_remove(self, path_info, force=False):
if not self.exists(path_info):
return
if not force and not self.already_cached(path_info):
msg = (
"file '{}' is going to be removed."
" Are you sure you want to proceed?".format(str(path_info))
)
if not prompt.confirm(msg):
raise ConfirmRemoveError(str(path_info))
self.remove(path_info)
def _checkout_file(
self, path_info, checksum, force, progress_callback=None
):
cache_info = self.checksum_to_path_info(checksum)
if self.exists(path_info):
msg = "data '{}' exists. Removing before checkout."
logger.warning(msg.format(str(path_info)))
self.safe_remove(path_info, force=force)
self.link(cache_info, path_info)
self.state.save_link(path_info)
self.state.save(path_info, checksum)
if progress_callback:
progress_callback(str(path_info))
def makedirs(self, path_info):
pass
def _checkout_dir(
self, path_info, checksum, force, progress_callback=None, relink=False
):
# Create dir separately so that dir is created
# even if there are no files in it
if not self.exists(path_info):
self.makedirs(path_info)
dir_info = self.get_dir_cache(checksum)
logger.debug("Linking directory '{}'.".format(path_info))
for entry in dir_info:
relative_path = entry[self.PARAM_RELPATH]
entry_checksum = entry[self.PARAM_CHECKSUM]
entry_cache_info = self.checksum_to_path_info(entry_checksum)
entry_info = path_info / relative_path
entry_checksum_info = {self.PARAM_CHECKSUM: entry_checksum}
if relink or self.changed(entry_info, entry_checksum_info):
self.safe_remove(entry_info, force=force)
self.link(entry_cache_info, entry_info)
self.state.save(entry_info, entry_checksum)
if progress_callback:
progress_callback(str(entry_info))
self._remove_redundant_files(path_info, dir_info, force)
self.state.save_link(path_info)
self.state.save(path_info, checksum)
def _remove_redundant_files(self, path_info, dir_info, force):
existing_files = set(self.walk_files(path_info))
needed_files = {
path_info / entry[self.PARAM_RELPATH] for entry in dir_info
}
for path in existing_files - needed_files:
self.safe_remove(path, force)
def checkout(
self,
path_info,
checksum_info,
force=False,
progress_callback=None,
relink=False,
):
if path_info.scheme not in ["local", self.scheme]:
raise NotImplementedError
checksum = checksum_info.get(self.PARAM_CHECKSUM)
failed = None
skip = False
if not checksum:
logger.warning(
"No checksum info found for '{}'. "
"It won't be created.".format(str(path_info))
)
self.safe_remove(path_info, force=force)
failed = path_info
elif not relink and not self.changed(path_info, checksum_info):
msg = "Data '{}' didn't change."
logger.debug(msg.format(str(path_info)))
skip = True
elif self.changed_cache(checksum):
msg = "Cache '{}' not found. File '{}' won't be created."
logger.warning(msg.format(checksum, str(path_info)))
self.safe_remove(path_info, force=force)
failed = path_info
if failed or skip:
if progress_callback:
progress_callback(
str(path_info), self.get_files_number(checksum)
)
return failed
msg = "Checking out '{}' with cache '{}'."
logger.debug(msg.format(str(path_info), checksum))
self._checkout(path_info, checksum, force, progress_callback, relink)
return None
def _checkout(
self,
path_info,
checksum,
force=False,
progress_callback=None,
relink=False,
):
if not self.is_dir_checksum(checksum):
return self._checkout_file(
path_info, checksum, force, progress_callback=progress_callback
)
return self._checkout_dir(
path_info, checksum, force, progress_callback, relink
)
def get_files_number(self, checksum):
if not checksum:
return 0
if self.is_dir_checksum(checksum):
return len(self.get_dir_cache(checksum))
return 1
@staticmethod
def unprotect(path_info):
pass
def _get_unpacked_dir_names(self, checksums):
return set()
def extract_used_local_checksums(self, named_cache):
used = set(named_cache["local"])
unpacked = self._get_unpacked_dir_names(used)
return used | unpacked
def _changed_unpacked_dir(self, checksum):
return True
def _update_unpacked_dir(self, checksum):
pass
| true | true |
f72e562b024f7ff13e8d304233d4cd6194d7de63 | 17,604 | py | Python | source/extensions/filters/network/kafka/protocol/generator.py | jaricftw/envoy | 766f3fb8dbdafce402631c43c16fda46ed003462 | [
"Apache-2.0"
] | 1 | 2021-12-10T23:58:57.000Z | 2021-12-10T23:58:57.000Z | source/extensions/filters/network/kafka/protocol/generator.py | jaricftw/envoy | 766f3fb8dbdafce402631c43c16fda46ed003462 | [
"Apache-2.0"
] | 30 | 2022-02-17T02:28:37.000Z | 2022-03-31T02:31:02.000Z | source/extensions/filters/network/kafka/protocol/generator.py | jaricftw/envoy | 766f3fb8dbdafce402631c43c16fda46ed003462 | [
"Apache-2.0"
] | 1 | 2020-03-28T12:23:29.000Z | 2020-03-28T12:23:29.000Z | #!/usr/bin/python
# Main library file containing all the protocol generation logic.
def generate_main_code(type, main_header_file, resolver_cc_file, input_files):
"""
Main code generator.
Takes input files and processes them into structures representing a Kafka message (request or
response).
These responses are then used to create:
- main_header_file - contains definitions of Kafka structures and their deserializers
- resolver_cc_file - contains request api key & version mapping to deserializer (from header file)
"""
# Parse provided input files.
messages = parse_messages(input_files)
complex_type_template = RenderingHelper.get_template('complex_type_template.j2')
parsers_template = RenderingHelper.get_template("%s_parser.j2" % type)
main_header_contents = ''
for message in messages:
# For each child structure that is used by request/response, render its matching C++ code.
for dependency in message.declaration_chain:
main_header_contents += complex_type_template.render(complex_type=dependency)
# Each top-level structure (e.g. FetchRequest/FetchResponse) needs corresponding parsers.
main_header_contents += parsers_template.render(complex_type=message)
# Full file with headers, namespace declaration etc.
template = RenderingHelper.get_template("%ss_h.j2" % type)
contents = template.render(contents=main_header_contents)
# Generate main header file.
with open(main_header_file, 'w') as fd:
fd.write(contents)
template = RenderingHelper.get_template("kafka_%s_resolver_cc.j2" % type)
contents = template.render(message_types=messages)
# Generate ...resolver.cc file.
with open(resolver_cc_file, 'w') as fd:
fd.write(contents)
def generate_test_code(type, header_test_cc_file, codec_test_cc_file, input_files):
"""
Test code generator.
Takes input files and processes them into structures representing a Kafka message (request or
response).
These responses are then used to create:
- header_test_cc_file - tests for basic message serialization deserialization
- codec_test_cc_file - tests involving codec and Request/ResponseParserResolver
"""
# Parse provided input files.
messages = parse_messages(input_files)
# Generate header-test file.
template = RenderingHelper.get_template("%ss_test_cc.j2" % type)
contents = template.render(message_types=messages)
with open(header_test_cc_file, 'w') as fd:
fd.write(contents)
# Generate codec-test file.
template = RenderingHelper.get_template("%s_codec_%s_test_cc.j2" % (type, type))
contents = template.render(message_types=messages)
with open(codec_test_cc_file, 'w') as fd:
fd.write(contents)
def parse_messages(input_files):
"""
Parse request/response structures from provided input files.
"""
import re
import json
messages = []
# For each specification file, remove comments, and parse the remains.
for input_file in input_files:
with open(input_file, 'r') as fd:
raw_contents = fd.read()
without_comments = re.sub(r'//.*\n', '', raw_contents)
message_spec = json.loads(without_comments)
message = parse_top_level_element(message_spec)
messages.append(message)
# Sort messages by api_key.
messages.sort(key=lambda x: x.get_extra('api_key'))
return messages
def parse_top_level_element(spec):
"""
Parse a given structure into a request/response.
Request/response is just a complex type, that has name & version information kept in differently
named fields, compared to sub-structures in a message.
"""
type_name = spec['name']
versions = Statics.parse_version_string(spec['validVersions'], 2 << 16 - 1)
return parse_complex_type(type_name, spec, versions).with_extra('api_key', spec['apiKey'])
def parse_complex_type(type_name, field_spec, versions):
"""
Parse given complex type, returning a structure that holds its name, field specification and
allowed versions.
"""
fields = []
for child_field in field_spec['fields']:
child = parse_field(child_field, versions[-1])
fields.append(child)
return Complex(type_name, fields, versions)
def parse_field(field_spec, highest_possible_version):
"""
Parse given field, returning a structure holding the name, type, and versions when this field is
actually used (nullable or not). Obviously, field cannot be used in version higher than its
type's usage.
"""
version_usage = Statics.parse_version_string(field_spec['versions'], highest_possible_version)
version_usage_as_nullable = Statics.parse_version_string(
field_spec['nullableVersions'],
highest_possible_version) if 'nullableVersions' in field_spec else range(-1)
parsed_type = parse_type(field_spec['type'], field_spec, highest_possible_version)
return FieldSpec(field_spec['name'], parsed_type, version_usage, version_usage_as_nullable)
def parse_type(type_name, field_spec, highest_possible_version):
"""
Parse a given type element - returns an array type, primitive (e.g. uint32_t) or complex one.
"""
if (type_name.startswith('[]')):
# In spec files, array types are defined as `[]underlying_type` instead of having its own
# element with type inside.
underlying_type = parse_type(type_name[2:], field_spec, highest_possible_version)
return Array(underlying_type)
else:
if (type_name in Primitive.PRIMITIVE_TYPE_NAMES):
return Primitive(type_name, field_spec.get('default'))
else:
versions = Statics.parse_version_string(field_spec['versions'], highest_possible_version)
return parse_complex_type(type_name, field_spec, versions)
class Statics:
@staticmethod
def parse_version_string(raw_versions, highest_possible_version):
"""
Return integer range that corresponds to version string in spec file.
"""
if raw_versions.endswith('+'):
return range(int(raw_versions[:-1]), highest_possible_version + 1)
else:
if '-' in raw_versions:
tokens = raw_versions.split('-', 1)
return range(int(tokens[0]), int(tokens[1]) + 1)
else:
single_version = int(raw_versions)
return range(single_version, single_version + 1)
class FieldList:
"""
List of fields used by given entity (request or child structure) in given message version
(as fields get added or removed across versions).
"""
def __init__(self, version, fields):
self.version = version
self.fields = fields
def used_fields(self):
"""
Return list of fields that are actually used in this version of structure.
"""
return filter(lambda x: x.used_in_version(self.version), self.fields)
def constructor_signature(self):
"""
Return constructor signature.
Multiple versions of the same structure can have identical signatures (due to version bumps in
Kafka).
"""
parameter_spec = map(lambda x: x.parameter_declaration(self.version), self.used_fields())
return ', '.join(parameter_spec)
def constructor_init_list(self):
"""
Renders member initialization list in constructor.
Takes care of potential optional<T> conversions (as field could be T in V1, but optional<T>
in V2).
"""
init_list = []
for field in self.fields:
if field.used_in_version(self.version):
if field.is_nullable():
if field.is_nullable_in_version(self.version):
# Field is optional<T>, and the parameter is optional<T> in this version.
init_list_item = '%s_{%s}' % (field.name, field.name)
init_list.append(init_list_item)
else:
# Field is optional<T>, and the parameter is T in this version.
init_list_item = '%s_{absl::make_optional(%s)}' % (field.name, field.name)
init_list.append(init_list_item)
else:
# Field is T, so parameter cannot be optional<T>.
init_list_item = '%s_{%s}' % (field.name, field.name)
init_list.append(init_list_item)
else:
# Field is not used in this version, so we need to put in default value.
init_list_item = '%s_{%s}' % (field.name, field.default_value())
init_list.append(init_list_item)
pass
return ', '.join(init_list)
def field_count(self):
return len(list(self.used_fields()))
def example_value(self):
return ', '.join(map(lambda x: x.example_value_for_test(self.version), self.used_fields()))
class FieldSpec:
"""
Represents a field present in a structure (request, or child structure thereof).
Contains name, type, and versions when it is used (nullable or not).
"""
def __init__(self, name, type, version_usage, version_usage_as_nullable):
import re
separated = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
self.name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', separated).lower()
self.type = type
self.version_usage = version_usage
self.version_usage_as_nullable = version_usage_as_nullable
def is_nullable(self):
return len(self.version_usage_as_nullable) > 0
def is_nullable_in_version(self, version):
"""
Whether the field is nullable in given version.
Fields can be non-nullable in earlier versions.
See https://github.com/apache/kafka/tree/2.2.0-rc0/clients/src/main/resources/common/message#nullable-fields
"""
return version in self.version_usage_as_nullable
def used_in_version(self, version):
return version in self.version_usage
def field_declaration(self):
if self.is_nullable():
return 'absl::optional<%s> %s' % (self.type.name, self.name)
else:
return '%s %s' % (self.type.name, self.name)
def parameter_declaration(self, version):
if self.is_nullable_in_version(version):
return 'absl::optional<%s> %s' % (self.type.name, self.name)
else:
return '%s %s' % (self.type.name, self.name)
def default_value(self):
if self.is_nullable():
return '{%s}' % self.type.default_value()
else:
return str(self.type.default_value())
def example_value_for_test(self, version):
if self.is_nullable():
return 'absl::make_optional<%s>(%s)' % (self.type.name,
self.type.example_value_for_test(version))
else:
return str(self.type.example_value_for_test(version))
def deserializer_name_in_version(self, version):
if self.is_nullable_in_version(version):
return 'Nullable%s' % self.type.deserializer_name_in_version(version)
else:
return self.type.deserializer_name_in_version(version)
def is_printable(self):
return self.type.is_printable()
class TypeSpecification:
def deserializer_name_in_version(self, version):
"""
Renders the deserializer name of given type, in message with given version.
"""
raise NotImplementedError()
def default_value(self):
"""
Returns a default value for given type.
"""
raise NotImplementedError()
def example_value_for_test(self, version):
raise NotImplementedError()
def is_printable(self):
raise NotImplementedError()
class Array(TypeSpecification):
"""
Represents array complex type.
To use instance of this type, it is necessary to declare structures required by self.underlying
(e.g. to use Array<Foo>, we need to have `struct Foo {...}`).
"""
def __init__(self, underlying):
self.underlying = underlying
self.declaration_chain = self.underlying.declaration_chain
@property
def name(self):
return 'std::vector<%s>' % self.underlying.name
def deserializer_name_in_version(self, version):
return 'ArrayDeserializer<%s, %s>' % (self.underlying.name,
self.underlying.deserializer_name_in_version(version))
def default_value(self):
return 'std::vector<%s>{}' % (self.underlying.name)
def example_value_for_test(self, version):
return 'std::vector<%s>{ %s }' % (self.underlying.name,
self.underlying.example_value_for_test(version))
def is_printable(self):
return self.underlying.is_printable()
class Primitive(TypeSpecification):
"""
Represents a Kafka primitive value.
"""
PRIMITIVE_TYPE_NAMES = ['bool', 'int8', 'int16', 'int32', 'int64', 'string', 'bytes']
KAFKA_TYPE_TO_ENVOY_TYPE = {
'string': 'std::string',
'bool': 'bool',
'int8': 'int8_t',
'int16': 'int16_t',
'int32': 'int32_t',
'int64': 'int64_t',
'bytes': 'Bytes',
}
KAFKA_TYPE_TO_DESERIALIZER = {
'string': 'StringDeserializer',
'bool': 'BooleanDeserializer',
'int8': 'Int8Deserializer',
'int16': 'Int16Deserializer',
'int32': 'Int32Deserializer',
'int64': 'Int64Deserializer',
'bytes': 'BytesDeserializer',
}
# See https://github.com/apache/kafka/tree/trunk/clients/src/main/resources/common/message#deserializing-messages
KAFKA_TYPE_TO_DEFAULT_VALUE = {
'string': '""',
'bool': 'false',
'int8': '0',
'int16': '0',
'int32': '0',
'int64': '0',
'bytes': '{}',
}
# Custom values that make test code more readable.
KAFKA_TYPE_TO_EXAMPLE_VALUE_FOR_TEST = {
'string': '"string"',
'bool': 'false',
'int8': 'static_cast<int8_t>(8)',
'int16': 'static_cast<int16_t>(16)',
'int32': 'static_cast<int32_t>(32)',
'int64': 'static_cast<int64_t>(64)',
'bytes': 'Bytes({0, 1, 2, 3})',
}
def __init__(self, name, custom_default_value):
self.original_name = name
self.name = Primitive.compute(name, Primitive.KAFKA_TYPE_TO_ENVOY_TYPE)
self.custom_default_value = custom_default_value
self.declaration_chain = []
self.deserializer_name = Primitive.compute(name, Primitive.KAFKA_TYPE_TO_DESERIALIZER)
@staticmethod
def compute(name, map):
if name in map:
return map[name]
else:
raise ValueError(name)
def deserializer_name_in_version(self, version):
return self.deserializer_name
def default_value(self):
if self.custom_default_value is not None:
return self.custom_default_value
else:
return Primitive.compute(self.original_name, Primitive.KAFKA_TYPE_TO_DEFAULT_VALUE)
def example_value_for_test(self, version):
return Primitive.compute(self.original_name, Primitive.KAFKA_TYPE_TO_EXAMPLE_VALUE_FOR_TEST)
def is_printable(self):
return self.name not in ['Bytes']
class Complex(TypeSpecification):
"""
Represents a complex type (multiple types aggregated into one).
This type gets mapped to a C++ struct.
"""
def __init__(self, name, fields, versions):
self.name = name
self.fields = fields
self.versions = versions
self.declaration_chain = self.__compute_declaration_chain()
self.attributes = {}
def __compute_declaration_chain(self):
"""
Computes all dependencies, what means all non-primitive types used by this type.
They need to be declared before this struct is declared.
"""
result = []
for field in self.fields:
result.extend(field.type.declaration_chain)
result.append(self)
return result
def with_extra(self, key, value):
self.attributes[key] = value
return self
def get_extra(self, key):
return self.attributes[key]
def compute_constructors(self):
"""
Field lists for different versions may not differ (as Kafka can bump version without any
changes). But constructors need to be unique, so we need to remove duplicates if the signatures
match.
"""
signature_to_constructor = {}
for field_list in self.compute_field_lists():
signature = field_list.constructor_signature()
constructor = signature_to_constructor.get(signature)
if constructor is None:
entry = {}
entry['versions'] = [field_list.version]
entry['signature'] = signature
if (len(signature) > 0):
entry['full_declaration'] = '%s(%s): %s {};' % (self.name, signature,
field_list.constructor_init_list())
else:
entry['full_declaration'] = '%s() {};' % self.name
signature_to_constructor[signature] = entry
else:
constructor['versions'].append(field_list.version)
return sorted(signature_to_constructor.values(), key=lambda x: x['versions'][0])
def compute_field_lists(self):
"""
Return field lists representing each of structure versions.
"""
field_lists = []
for version in self.versions:
field_list = FieldList(version, self.fields)
field_lists.append(field_list)
return field_lists
def deserializer_name_in_version(self, version):
return '%sV%dDeserializer' % (self.name, version)
def default_value(self):
raise NotImplementedError('unable to create default value of complex type')
def example_value_for_test(self, version):
field_list = next(fl for fl in self.compute_field_lists() if fl.version == version)
example_values = map(lambda x: x.example_value_for_test(version), field_list.used_fields())
return '%s(%s)' % (self.name, ', '.join(example_values))
def is_printable(self):
return True
class RenderingHelper:
"""
Helper for jinja templates.
"""
@staticmethod
def get_template(template):
import jinja2
import os
import sys
# Templates are resolved relatively to main start script, due to main & test templates being
# stored in different directories.
env = jinja2.Environment(loader=jinja2.FileSystemLoader(
searchpath=os.path.dirname(os.path.abspath(sys.argv[0]))))
return env.get_template(template)
| 33.984556 | 115 | 0.696603 |
def generate_main_code(type, main_header_file, resolver_cc_file, input_files):
messages = parse_messages(input_files)
complex_type_template = RenderingHelper.get_template('complex_type_template.j2')
parsers_template = RenderingHelper.get_template("%s_parser.j2" % type)
main_header_contents = ''
for message in messages:
for dependency in message.declaration_chain:
main_header_contents += complex_type_template.render(complex_type=dependency)
main_header_contents += parsers_template.render(complex_type=message)
template = RenderingHelper.get_template("%ss_h.j2" % type)
contents = template.render(contents=main_header_contents)
with open(main_header_file, 'w') as fd:
fd.write(contents)
template = RenderingHelper.get_template("kafka_%s_resolver_cc.j2" % type)
contents = template.render(message_types=messages)
with open(resolver_cc_file, 'w') as fd:
fd.write(contents)
def generate_test_code(type, header_test_cc_file, codec_test_cc_file, input_files):
messages = parse_messages(input_files)
template = RenderingHelper.get_template("%ss_test_cc.j2" % type)
contents = template.render(message_types=messages)
with open(header_test_cc_file, 'w') as fd:
fd.write(contents)
template = RenderingHelper.get_template("%s_codec_%s_test_cc.j2" % (type, type))
contents = template.render(message_types=messages)
with open(codec_test_cc_file, 'w') as fd:
fd.write(contents)
def parse_messages(input_files):
import re
import json
messages = []
for input_file in input_files:
with open(input_file, 'r') as fd:
raw_contents = fd.read()
without_comments = re.sub(r'//.*\n', '', raw_contents)
message_spec = json.loads(without_comments)
message = parse_top_level_element(message_spec)
messages.append(message)
messages.sort(key=lambda x: x.get_extra('api_key'))
return messages
def parse_top_level_element(spec):
type_name = spec['name']
versions = Statics.parse_version_string(spec['validVersions'], 2 << 16 - 1)
return parse_complex_type(type_name, spec, versions).with_extra('api_key', spec['apiKey'])
def parse_complex_type(type_name, field_spec, versions):
fields = []
for child_field in field_spec['fields']:
child = parse_field(child_field, versions[-1])
fields.append(child)
return Complex(type_name, fields, versions)
def parse_field(field_spec, highest_possible_version):
version_usage = Statics.parse_version_string(field_spec['versions'], highest_possible_version)
version_usage_as_nullable = Statics.parse_version_string(
field_spec['nullableVersions'],
highest_possible_version) if 'nullableVersions' in field_spec else range(-1)
parsed_type = parse_type(field_spec['type'], field_spec, highest_possible_version)
return FieldSpec(field_spec['name'], parsed_type, version_usage, version_usage_as_nullable)
def parse_type(type_name, field_spec, highest_possible_version):
if (type_name.startswith('[]')):
underlying_type = parse_type(type_name[2:], field_spec, highest_possible_version)
return Array(underlying_type)
else:
if (type_name in Primitive.PRIMITIVE_TYPE_NAMES):
return Primitive(type_name, field_spec.get('default'))
else:
versions = Statics.parse_version_string(field_spec['versions'], highest_possible_version)
return parse_complex_type(type_name, field_spec, versions)
class Statics:
@staticmethod
def parse_version_string(raw_versions, highest_possible_version):
if raw_versions.endswith('+'):
return range(int(raw_versions[:-1]), highest_possible_version + 1)
else:
if '-' in raw_versions:
tokens = raw_versions.split('-', 1)
return range(int(tokens[0]), int(tokens[1]) + 1)
else:
single_version = int(raw_versions)
return range(single_version, single_version + 1)
class FieldList:
def __init__(self, version, fields):
self.version = version
self.fields = fields
def used_fields(self):
return filter(lambda x: x.used_in_version(self.version), self.fields)
def constructor_signature(self):
parameter_spec = map(lambda x: x.parameter_declaration(self.version), self.used_fields())
return ', '.join(parameter_spec)
def constructor_init_list(self):
init_list = []
for field in self.fields:
if field.used_in_version(self.version):
if field.is_nullable():
if field.is_nullable_in_version(self.version):
init_list_item = '%s_{%s}' % (field.name, field.name)
init_list.append(init_list_item)
else:
init_list_item = '%s_{absl::make_optional(%s)}' % (field.name, field.name)
init_list.append(init_list_item)
else:
init_list_item = '%s_{%s}' % (field.name, field.name)
init_list.append(init_list_item)
else:
init_list_item = '%s_{%s}' % (field.name, field.default_value())
init_list.append(init_list_item)
pass
return ', '.join(init_list)
def field_count(self):
return len(list(self.used_fields()))
def example_value(self):
return ', '.join(map(lambda x: x.example_value_for_test(self.version), self.used_fields()))
class FieldSpec:
def __init__(self, name, type, version_usage, version_usage_as_nullable):
import re
separated = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
self.name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', separated).lower()
self.type = type
self.version_usage = version_usage
self.version_usage_as_nullable = version_usage_as_nullable
def is_nullable(self):
return len(self.version_usage_as_nullable) > 0
def is_nullable_in_version(self, version):
return version in self.version_usage_as_nullable
def used_in_version(self, version):
return version in self.version_usage
def field_declaration(self):
if self.is_nullable():
return 'absl::optional<%s> %s' % (self.type.name, self.name)
else:
return '%s %s' % (self.type.name, self.name)
def parameter_declaration(self, version):
if self.is_nullable_in_version(version):
return 'absl::optional<%s> %s' % (self.type.name, self.name)
else:
return '%s %s' % (self.type.name, self.name)
def default_value(self):
if self.is_nullable():
return '{%s}' % self.type.default_value()
else:
return str(self.type.default_value())
def example_value_for_test(self, version):
if self.is_nullable():
return 'absl::make_optional<%s>(%s)' % (self.type.name,
self.type.example_value_for_test(version))
else:
return str(self.type.example_value_for_test(version))
def deserializer_name_in_version(self, version):
if self.is_nullable_in_version(version):
return 'Nullable%s' % self.type.deserializer_name_in_version(version)
else:
return self.type.deserializer_name_in_version(version)
def is_printable(self):
return self.type.is_printable()
class TypeSpecification:
def deserializer_name_in_version(self, version):
raise NotImplementedError()
def default_value(self):
raise NotImplementedError()
def example_value_for_test(self, version):
raise NotImplementedError()
def is_printable(self):
raise NotImplementedError()
class Array(TypeSpecification):
def __init__(self, underlying):
self.underlying = underlying
self.declaration_chain = self.underlying.declaration_chain
@property
def name(self):
return 'std::vector<%s>' % self.underlying.name
def deserializer_name_in_version(self, version):
return 'ArrayDeserializer<%s, %s>' % (self.underlying.name,
self.underlying.deserializer_name_in_version(version))
def default_value(self):
return 'std::vector<%s>{}' % (self.underlying.name)
def example_value_for_test(self, version):
return 'std::vector<%s>{ %s }' % (self.underlying.name,
self.underlying.example_value_for_test(version))
def is_printable(self):
return self.underlying.is_printable()
class Primitive(TypeSpecification):
PRIMITIVE_TYPE_NAMES = ['bool', 'int8', 'int16', 'int32', 'int64', 'string', 'bytes']
KAFKA_TYPE_TO_ENVOY_TYPE = {
'string': 'std::string',
'bool': 'bool',
'int8': 'int8_t',
'int16': 'int16_t',
'int32': 'int32_t',
'int64': 'int64_t',
'bytes': 'Bytes',
}
KAFKA_TYPE_TO_DESERIALIZER = {
'string': 'StringDeserializer',
'bool': 'BooleanDeserializer',
'int8': 'Int8Deserializer',
'int16': 'Int16Deserializer',
'int32': 'Int32Deserializer',
'int64': 'Int64Deserializer',
'bytes': 'BytesDeserializer',
}
T_VALUE = {
'string': '""',
'bool': 'false',
'int8': '0',
'int16': '0',
'int32': '0',
'int64': '0',
'bytes': '{}',
}
KAFKA_TYPE_TO_EXAMPLE_VALUE_FOR_TEST = {
'string': '"string"',
'bool': 'false',
'int8': 'static_cast<int8_t>(8)',
'int16': 'static_cast<int16_t>(16)',
'int32': 'static_cast<int32_t>(32)',
'int64': 'static_cast<int64_t>(64)',
'bytes': 'Bytes({0, 1, 2, 3})',
}
def __init__(self, name, custom_default_value):
self.original_name = name
self.name = Primitive.compute(name, Primitive.KAFKA_TYPE_TO_ENVOY_TYPE)
self.custom_default_value = custom_default_value
self.declaration_chain = []
self.deserializer_name = Primitive.compute(name, Primitive.KAFKA_TYPE_TO_DESERIALIZER)
@staticmethod
def compute(name, map):
if name in map:
return map[name]
else:
raise ValueError(name)
def deserializer_name_in_version(self, version):
return self.deserializer_name
def default_value(self):
if self.custom_default_value is not None:
return self.custom_default_value
else:
return Primitive.compute(self.original_name, Primitive.KAFKA_TYPE_TO_DEFAULT_VALUE)
def example_value_for_test(self, version):
return Primitive.compute(self.original_name, Primitive.KAFKA_TYPE_TO_EXAMPLE_VALUE_FOR_TEST)
def is_printable(self):
return self.name not in ['Bytes']
class Complex(TypeSpecification):
def __init__(self, name, fields, versions):
self.name = name
self.fields = fields
self.versions = versions
self.declaration_chain = self.__compute_declaration_chain()
self.attributes = {}
def __compute_declaration_chain(self):
result = []
for field in self.fields:
result.extend(field.type.declaration_chain)
result.append(self)
return result
def with_extra(self, key, value):
self.attributes[key] = value
return self
def get_extra(self, key):
return self.attributes[key]
def compute_constructors(self):
signature_to_constructor = {}
for field_list in self.compute_field_lists():
signature = field_list.constructor_signature()
constructor = signature_to_constructor.get(signature)
if constructor is None:
entry = {}
entry['versions'] = [field_list.version]
entry['signature'] = signature
if (len(signature) > 0):
entry['full_declaration'] = '%s(%s): %s {};' % (self.name, signature,
field_list.constructor_init_list())
else:
entry['full_declaration'] = '%s() {};' % self.name
signature_to_constructor[signature] = entry
else:
constructor['versions'].append(field_list.version)
return sorted(signature_to_constructor.values(), key=lambda x: x['versions'][0])
def compute_field_lists(self):
field_lists = []
for version in self.versions:
field_list = FieldList(version, self.fields)
field_lists.append(field_list)
return field_lists
def deserializer_name_in_version(self, version):
return '%sV%dDeserializer' % (self.name, version)
def default_value(self):
raise NotImplementedError('unable to create default value of complex type')
def example_value_for_test(self, version):
field_list = next(fl for fl in self.compute_field_lists() if fl.version == version)
example_values = map(lambda x: x.example_value_for_test(version), field_list.used_fields())
return '%s(%s)' % (self.name, ', '.join(example_values))
def is_printable(self):
return True
class RenderingHelper:
@staticmethod
def get_template(template):
import jinja2
import os
import sys
env = jinja2.Environment(loader=jinja2.FileSystemLoader(
searchpath=os.path.dirname(os.path.abspath(sys.argv[0]))))
return env.get_template(template)
| true | true |
f72e569409170b725e2459305ed08163426eefec | 157 | py | Python | testmeuk/test_enum.py | Jhsmit/AtomicPlot | 551a79b899008408e1126e67ee690a87b0aa6e15 | [
"MIT"
] | null | null | null | testmeuk/test_enum.py | Jhsmit/AtomicPlot | 551a79b899008408e1126e67ee690a87b0aa6e15 | [
"MIT"
] | 1 | 2018-06-07T09:40:19.000Z | 2018-06-07T09:40:19.000Z | testmeuk/test_enum.py | Jhsmit/AtomicPlot | 551a79b899008408e1126e67ee690a87b0aa6e15 | [
"MIT"
] | null | null | null | from atom.api import Atom, Enum, Int, Str
class EnumTest(Atom):
att = Enum(5, '4')
et = EnumTest()
et.att = 5
et.att = '5'
et.att = 3.4 | 12.076923 | 42 | 0.547771 | from atom.api import Atom, Enum, Int, Str
class EnumTest(Atom):
att = Enum(5, '4')
et = EnumTest()
et.att = 5
et.att = '5'
et.att = 3.4 | true | true |
f72e57a4c0cf9726978343096b637c3248cc5657 | 829 | py | Python | yoapi/extensions/flask_sendgrid.py | YoApp/yo-api | a162e51804ab91724cc7ad3e7608410329da6789 | [
"MIT"
] | 1 | 2021-12-17T03:25:34.000Z | 2021-12-17T03:25:34.000Z | yoapi/extensions/flask_sendgrid.py | YoApp/yo-api | a162e51804ab91724cc7ad3e7608410329da6789 | [
"MIT"
] | null | null | null | yoapi/extensions/flask_sendgrid.py | YoApp/yo-api | a162e51804ab91724cc7ad3e7608410329da6789 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Flask extension pacakge for Sendgrid"""
from . import FlaskExtension
from sendgrid import SendGridClient, Mail
class SendGrid(FlaskExtension):
"""A helper class for managing a the SendGrid API calls"""
EXTENSION_NAME = 'sendgrid'
def __init__(self, app=None):
super(SendGrid, self).__init__(app=app)
def _create_instance(self, app):
client = SendGridClient(
app.config.get('SENDGRID_USERNAME'),
app.config.get('SENDGRID_PASSWORD'))
return client
def send_mail(self, body=None, subject=None, recipient=None, sender=None):
"""Sends an email"""
mail = Mail(to=recipient,
from_email=sender,
subject=subject,
text=body)
self.instance.send(mail)
| 25.90625 | 78 | 0.615199 |
from . import FlaskExtension
from sendgrid import SendGridClient, Mail
class SendGrid(FlaskExtension):
EXTENSION_NAME = 'sendgrid'
def __init__(self, app=None):
super(SendGrid, self).__init__(app=app)
def _create_instance(self, app):
client = SendGridClient(
app.config.get('SENDGRID_USERNAME'),
app.config.get('SENDGRID_PASSWORD'))
return client
def send_mail(self, body=None, subject=None, recipient=None, sender=None):
mail = Mail(to=recipient,
from_email=sender,
subject=subject,
text=body)
self.instance.send(mail)
| true | true |
f72e5883e8bc74d074a7e160b876d184fda48193 | 1,596 | py | Python | src/an_FilterS1.py | mbonnema/SWAV | d5dd4dd1a88de008f27b0232c536491c7dc84623 | [
"CNRI-Python"
] | null | null | null | src/an_FilterS1.py | mbonnema/SWAV | d5dd4dd1a88de008f27b0232c536491c7dc84623 | [
"CNRI-Python"
] | null | null | null | src/an_FilterS1.py | mbonnema/SWAV | d5dd4dd1a88de008f27b0232c536491c7dc84623 | [
"CNRI-Python"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 28 11:35:01 2021
@author: mbonnema
"""
import numpy as np
def FilterS1(D,A,WE,LE):
D_f = {}
A_f = {}
WE_f = {}
LE_f = {}
for key in D:
dates = D[key]
areas = A[key]
werrors = WE[key]
lerrors = LE[key]
d_f = []
a_f = []
we_f = []
le_f = []
for d,a,we,le in zip(dates,areas,werrors,lerrors):
#print(a)
if we < 0:
we = 0
if le < 0:
le = 0
if a > 0:
if we/a > 0.1:
#print('fail 1')
continue
if a > 0:
if le/a > 0.1:
#print('fail 2')
continue
#print('passed')
d_f.append(d)
a_f.append(a)
we_f.append(we)
le_f.append(le)
a_std = np.std(np.array(a_f))
a_mean = np.mean(np.array(a_f))
d_f = np.array(d_f)[np.array([a_f<=(a_mean+a_std*3),a_f>=(a_mean-a_std*3)]).all(axis=0)]
we_f = np.array(we_f)[np.array([a_f<=(a_mean+a_std*3),a_f>=(a_mean-a_std*3)]).all(axis=0)]
le_f = np.array(le_f)[np.array([a_f<=(a_mean+a_std*3),a_f>=(a_mean-a_std*3)]).all(axis=0)]
a_f = np.array(a_f)[np.array([a_f<=(a_mean+a_std*3),a_f>=(a_mean-a_std*3)]).all(axis=0)]
D_f[key] = d_f
A_f[key] = a_f
WE_f[key] = we_f
LE_f[key] = le_f
return(D_f,A_f,WE_f,LE_f)
| 27.050847 | 98 | 0.431704 |
import numpy as np
def FilterS1(D,A,WE,LE):
D_f = {}
A_f = {}
WE_f = {}
LE_f = {}
for key in D:
dates = D[key]
areas = A[key]
werrors = WE[key]
lerrors = LE[key]
d_f = []
a_f = []
we_f = []
le_f = []
for d,a,we,le in zip(dates,areas,werrors,lerrors):
if we < 0:
we = 0
if le < 0:
le = 0
if a > 0:
if we/a > 0.1:
continue
if a > 0:
if le/a > 0.1:
continue
d_f.append(d)
a_f.append(a)
we_f.append(we)
le_f.append(le)
a_std = np.std(np.array(a_f))
a_mean = np.mean(np.array(a_f))
d_f = np.array(d_f)[np.array([a_f<=(a_mean+a_std*3),a_f>=(a_mean-a_std*3)]).all(axis=0)]
we_f = np.array(we_f)[np.array([a_f<=(a_mean+a_std*3),a_f>=(a_mean-a_std*3)]).all(axis=0)]
le_f = np.array(le_f)[np.array([a_f<=(a_mean+a_std*3),a_f>=(a_mean-a_std*3)]).all(axis=0)]
a_f = np.array(a_f)[np.array([a_f<=(a_mean+a_std*3),a_f>=(a_mean-a_std*3)]).all(axis=0)]
D_f[key] = d_f
A_f[key] = a_f
WE_f[key] = we_f
LE_f[key] = le_f
return(D_f,A_f,WE_f,LE_f)
| true | true |
f72e58af1326ae193bd57bd14e31f871dea6eebf | 6,316 | py | Python | src/mpi4py/bench.py | renefritze/mpi4py | 23d3635574eeb5eb7ebf4fb51f91f0604306d848 | [
"BSD-2-Clause"
] | null | null | null | src/mpi4py/bench.py | renefritze/mpi4py | 23d3635574eeb5eb7ebf4fb51f91f0604306d848 | [
"BSD-2-Clause"
] | null | null | null | src/mpi4py/bench.py | renefritze/mpi4py | 23d3635574eeb5eb7ebf4fb51f91f0604306d848 | [
"BSD-2-Clause"
] | null | null | null | # Author: Lisandro Dalcin
# Contact: dalcinl@gmail.com
"""Run MPI benchmarks and tests."""
import sys as _sys
def helloworld(comm, args=None, verbose=True):
"""Hello, World! using MPI."""
# pylint: disable=import-outside-toplevel
from argparse import ArgumentParser
parser = ArgumentParser(prog=__name__ + " helloworld")
parser.add_argument("-q", "--quiet", action="store_false",
dest="verbose", default=verbose)
options = parser.parse_args(args)
from . import MPI
size = comm.Get_size()
rank = comm.Get_rank()
name = MPI.Get_processor_name()
message = ("Hello, World! I am process %*d of %d on %s.\n"
% (len(str(size - 1)), rank, size, name))
comm.Barrier()
if rank > 0:
comm.Recv([None, 'B'], rank - 1)
if options.verbose:
_sys.stdout.write(message)
_sys.stdout.flush()
if rank < size - 1:
comm.Send([None, 'B'], rank + 1)
comm.Barrier()
return message
def ringtest(comm, args=None, verbose=True):
"""Time a message going around the ring of processes."""
# pylint: disable=too-many-locals
# pylint: disable=too-many-statements
# pylint: disable=import-outside-toplevel
from argparse import ArgumentParser
parser = ArgumentParser(prog=__name__ + " ringtest")
parser.add_argument("-q", "--quiet", action="store_false",
dest="verbose", default=verbose)
parser.add_argument("-n", "--size", type=int, default=1, dest="size",
help="message size")
parser.add_argument("-s", "--skip", type=int, default=0, dest="skip",
help="number of warm-up iterations")
parser.add_argument("-l", "--loop", type=int, default=1, dest="loop",
help="number of iterations")
options = parser.parse_args(args)
def ring(comm, n=1, loop=1, skip=0):
# pylint: disable=invalid-name
# pylint: disable=missing-docstring
from array import array
from . import MPI
iterations = list(range((loop + skip)))
size = comm.Get_size()
rank = comm.Get_rank()
source = (rank - 1) % size
dest = (rank + 1) % size
Sendrecv = comm.Sendrecv
Send = comm.Send
Recv = comm.Recv
Wtime = MPI.Wtime
sendmsg = array('B', [+42]) * n
recvmsg = array('B', [0x0]) * n
if size == 1:
for i in iterations:
if i == skip:
tic = Wtime()
Sendrecv(sendmsg, dest, 0,
recvmsg, source, 0)
else:
if rank == 0:
for i in iterations:
if i == skip:
tic = Wtime()
Send(sendmsg, dest, 0)
Recv(recvmsg, source, 0)
else:
sendmsg = recvmsg
for i in iterations:
if i == skip:
tic = Wtime()
Recv(recvmsg, source, 0)
Send(sendmsg, dest, 0)
toc = Wtime()
if comm.rank == 0 and sendmsg != recvmsg: # pragma: no cover
import warnings
import traceback
try:
warnings.warn("received message does not match!")
except UserWarning:
traceback.print_exc()
comm.Abort(2)
return toc - tic
size = getattr(options, 'size', 1)
loop = getattr(options, 'loop', 1)
skip = getattr(options, 'skip', 0)
comm.Barrier()
elapsed = ring(comm, size, loop, skip)
if options.verbose and comm.rank == 0:
message = ("time for %d loops = %g seconds (%d processes, %d bytes)\n"
% (loop, elapsed, comm.size, size))
_sys.stdout.write(message)
_sys.stdout.flush()
return elapsed
def main(args=None):
"""Entry-point for ``python -m mpi4py.bench``."""
# pylint: disable=import-outside-toplevel
from argparse import ArgumentParser, REMAINDER
parser = ArgumentParser(prog=__name__,
usage="%(prog)s [options] <command> [args]")
parser.add_argument("--threads",
action="store_true", dest="threads", default=None,
help="initialize MPI with thread support")
parser.add_argument("--no-threads",
action="store_false", dest="threads", default=None,
help="initialize MPI without thread support")
parser.add_argument("--thread-level",
dest="thread_level", default=None,
action="store", metavar="LEVEL",
choices="single funneled serialized multiple".split(),
help="initialize MPI with required thread level")
parser.add_argument("--mpe",
action="store_true", dest="mpe", default=False,
help="use MPE for MPI profiling")
parser.add_argument("--vt",
action="store_true", dest="vt", default=False,
help="use VampirTrace for MPI profiling")
parser.add_argument("command",
action="store", metavar="<command>",
help="benchmark command to run")
parser.add_argument("args",
nargs=REMAINDER, metavar="[args]",
help="arguments for benchmark command")
options = parser.parse_args(args)
from . import rc, profile
if options.threads is not None:
rc.threads = options.threads
if options.thread_level is not None:
rc.thread_level = options.thread_level
if options.mpe:
profile('mpe', logfile='mpi4py')
if options.vt:
profile('vt', logfile='mpi4py')
from . import MPI
comm = MPI.COMM_WORLD
if options.command not in main.commands:
if comm.rank == 0:
parser.error("unknown command '%s'" % options.command)
parser.exit(2)
command = main.commands[options.command]
command(comm, options.args)
parser.exit()
main.commands = { # type: ignore[attr-defined]
'helloworld': helloworld,
'ringtest': ringtest,
}
if __name__ == '__main__':
main()
| 37.152941 | 78 | 0.547498 |
import sys as _sys
def helloworld(comm, args=None, verbose=True):
from argparse import ArgumentParser
parser = ArgumentParser(prog=__name__ + " helloworld")
parser.add_argument("-q", "--quiet", action="store_false",
dest="verbose", default=verbose)
options = parser.parse_args(args)
from . import MPI
size = comm.Get_size()
rank = comm.Get_rank()
name = MPI.Get_processor_name()
message = ("Hello, World! I am process %*d of %d on %s.\n"
% (len(str(size - 1)), rank, size, name))
comm.Barrier()
if rank > 0:
comm.Recv([None, 'B'], rank - 1)
if options.verbose:
_sys.stdout.write(message)
_sys.stdout.flush()
if rank < size - 1:
comm.Send([None, 'B'], rank + 1)
comm.Barrier()
return message
def ringtest(comm, args=None, verbose=True):
from argparse import ArgumentParser
parser = ArgumentParser(prog=__name__ + " ringtest")
parser.add_argument("-q", "--quiet", action="store_false",
dest="verbose", default=verbose)
parser.add_argument("-n", "--size", type=int, default=1, dest="size",
help="message size")
parser.add_argument("-s", "--skip", type=int, default=0, dest="skip",
help="number of warm-up iterations")
parser.add_argument("-l", "--loop", type=int, default=1, dest="loop",
help="number of iterations")
options = parser.parse_args(args)
def ring(comm, n=1, loop=1, skip=0):
from array import array
from . import MPI
iterations = list(range((loop + skip)))
size = comm.Get_size()
rank = comm.Get_rank()
source = (rank - 1) % size
dest = (rank + 1) % size
Sendrecv = comm.Sendrecv
Send = comm.Send
Recv = comm.Recv
Wtime = MPI.Wtime
sendmsg = array('B', [+42]) * n
recvmsg = array('B', [0x0]) * n
if size == 1:
for i in iterations:
if i == skip:
tic = Wtime()
Sendrecv(sendmsg, dest, 0,
recvmsg, source, 0)
else:
if rank == 0:
for i in iterations:
if i == skip:
tic = Wtime()
Send(sendmsg, dest, 0)
Recv(recvmsg, source, 0)
else:
sendmsg = recvmsg
for i in iterations:
if i == skip:
tic = Wtime()
Recv(recvmsg, source, 0)
Send(sendmsg, dest, 0)
toc = Wtime()
if comm.rank == 0 and sendmsg != recvmsg:
import warnings
import traceback
try:
warnings.warn("received message does not match!")
except UserWarning:
traceback.print_exc()
comm.Abort(2)
return toc - tic
size = getattr(options, 'size', 1)
loop = getattr(options, 'loop', 1)
skip = getattr(options, 'skip', 0)
comm.Barrier()
elapsed = ring(comm, size, loop, skip)
if options.verbose and comm.rank == 0:
message = ("time for %d loops = %g seconds (%d processes, %d bytes)\n"
% (loop, elapsed, comm.size, size))
_sys.stdout.write(message)
_sys.stdout.flush()
return elapsed
def main(args=None):
from argparse import ArgumentParser, REMAINDER
parser = ArgumentParser(prog=__name__,
usage="%(prog)s [options] <command> [args]")
parser.add_argument("--threads",
action="store_true", dest="threads", default=None,
help="initialize MPI with thread support")
parser.add_argument("--no-threads",
action="store_false", dest="threads", default=None,
help="initialize MPI without thread support")
parser.add_argument("--thread-level",
dest="thread_level", default=None,
action="store", metavar="LEVEL",
choices="single funneled serialized multiple".split(),
help="initialize MPI with required thread level")
parser.add_argument("--mpe",
action="store_true", dest="mpe", default=False,
help="use MPE for MPI profiling")
parser.add_argument("--vt",
action="store_true", dest="vt", default=False,
help="use VampirTrace for MPI profiling")
parser.add_argument("command",
action="store", metavar="<command>",
help="benchmark command to run")
parser.add_argument("args",
nargs=REMAINDER, metavar="[args]",
help="arguments for benchmark command")
options = parser.parse_args(args)
from . import rc, profile
if options.threads is not None:
rc.threads = options.threads
if options.thread_level is not None:
rc.thread_level = options.thread_level
if options.mpe:
profile('mpe', logfile='mpi4py')
if options.vt:
profile('vt', logfile='mpi4py')
from . import MPI
comm = MPI.COMM_WORLD
if options.command not in main.commands:
if comm.rank == 0:
parser.error("unknown command '%s'" % options.command)
parser.exit(2)
command = main.commands[options.command]
command(comm, options.args)
parser.exit()
main.commands = {
'helloworld': helloworld,
'ringtest': ringtest,
}
if __name__ == '__main__':
main()
| true | true |
f72e5997f478141aba757b99a8e4ba2d3b188609 | 1,487 | py | Python | tests/io/test_audit_log.py | oklymenok/pyTenable | 73475e37034608afa5e9c7b20c9cec33a2818622 | [
"MIT"
] | 1 | 2020-05-22T12:08:52.000Z | 2020-05-22T12:08:52.000Z | tests/io/test_audit_log.py | oklymenok/pyTenable | 73475e37034608afa5e9c7b20c9cec33a2818622 | [
"MIT"
] | null | null | null | tests/io/test_audit_log.py | oklymenok/pyTenable | 73475e37034608afa5e9c7b20c9cec33a2818622 | [
"MIT"
] | null | null | null | from .fixtures import *
from tenable.errors import *
def test_event_field_name_typeerror(api):
with pytest.raises(TypeError):
api.audit_log.events((1, 'gt', '2018-01-01'))
def test_event_filter_operator_typeerror(api):
with pytest.raises(TypeError):
api.audit_log.events(('date', 1, '2018-01-01'))
def test_event_filter_value_typeerror(api):
with pytest.raises(TypeError):
api.audit_log.events(('date', 'gt', 1))
def test_event_limit_typeerror(api):
with pytest.raises(TypeError):
api.audit_log.events(limit='nope')
def test_events_standard_user_permissionerror(stdapi):
with pytest.raises(PermissionError):
stdapi.audit_log.events()
def test_events(api):
events = api.audit_log.events(('date', 'gt', '2018-01-01'), limit=100)
assert isinstance(events, list)
e = events[-1]
check(e, 'action', str)
check(e, 'actor', dict)
check(e['actor'], 'id', 'uuid')
check(e['actor'], 'name', str, allow_none=True)
check(e, 'crud', str)
check(e, 'description', str, allow_none=True)
check(e, 'fields', list)
for d in e['fields']:
check(d, 'key', str)
check(d, 'value', str)
check(e, 'id', str)
check(e, 'is_anonymous', bool, allow_none=True)
check(e, 'is_failure', bool, allow_none=True)
check(e, 'received', 'datetime')
check(e, 'target', dict)
check(e['target'], 'id', 'uuid')
check(e['target'], 'name', str)
check(e['target'], 'type', str) | 33.044444 | 74 | 0.64425 | from .fixtures import *
from tenable.errors import *
def test_event_field_name_typeerror(api):
with pytest.raises(TypeError):
api.audit_log.events((1, 'gt', '2018-01-01'))
def test_event_filter_operator_typeerror(api):
with pytest.raises(TypeError):
api.audit_log.events(('date', 1, '2018-01-01'))
def test_event_filter_value_typeerror(api):
with pytest.raises(TypeError):
api.audit_log.events(('date', 'gt', 1))
def test_event_limit_typeerror(api):
with pytest.raises(TypeError):
api.audit_log.events(limit='nope')
def test_events_standard_user_permissionerror(stdapi):
with pytest.raises(PermissionError):
stdapi.audit_log.events()
def test_events(api):
events = api.audit_log.events(('date', 'gt', '2018-01-01'), limit=100)
assert isinstance(events, list)
e = events[-1]
check(e, 'action', str)
check(e, 'actor', dict)
check(e['actor'], 'id', 'uuid')
check(e['actor'], 'name', str, allow_none=True)
check(e, 'crud', str)
check(e, 'description', str, allow_none=True)
check(e, 'fields', list)
for d in e['fields']:
check(d, 'key', str)
check(d, 'value', str)
check(e, 'id', str)
check(e, 'is_anonymous', bool, allow_none=True)
check(e, 'is_failure', bool, allow_none=True)
check(e, 'received', 'datetime')
check(e, 'target', dict)
check(e['target'], 'id', 'uuid')
check(e['target'], 'name', str)
check(e['target'], 'type', str) | true | true |
f72e59ce85863fc225c1c1258f4ce23a596cff3e | 2,330 | py | Python | ttp/src/lightsail_enum_load_balancers_src.py | blackbotinc/AWS-Attack | ad4668ab60173aabce3c6b9c7685160be5e3f14d | [
"Apache-2.0",
"BSD-3-Clause"
] | 26 | 2021-03-29T13:39:28.000Z | 2022-03-21T10:57:58.000Z | ttp/src/lightsail_enum_load_balancers_src.py | blackbotinc/AWS-Attack | ad4668ab60173aabce3c6b9c7685160be5e3f14d | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | ttp/src/lightsail_enum_load_balancers_src.py | blackbotinc/AWS-Attack | ad4668ab60173aabce3c6b9c7685160be5e3f14d | [
"Apache-2.0",
"BSD-3-Clause"
] | 8 | 2021-02-23T12:17:04.000Z | 2022-02-25T13:28:14.000Z | #!/usr/bin/env python3
import datetime
#'description': "This module examines Lightsail data fields and automatically enumerates them for all available regions. Available fields can be passed upon execution to only look at certain types of data. By default, all Lightsail fields will be captured.",
import argparse
from botocore.exceptions import ClientError
def setup_storage(fields):
out = {}
for field in fields:
out[field] = []
return out
# Converts snake_case to camelcase.
def camelCase(name):
splitted = name.split('_')
out = splitted[0]
for word in splitted[1:]:
out += word[0].upper() + word[1:]
return out
def fetch_lightsail_data(client, func, print):
# Adding 'get_' portion to each field to build command.
caller = getattr(client, 'get_' + func)
try:
response = caller()
data = response[camelCase(func)]
while 'nextPageToken' in response:
response = caller(pageToken=response['nextPageToken'])
data.extend(response[camelCase(func)])
print(' Found {} {}'.format(len(data), func))
if func != 'active_names':
for resource in data:
resource['region'] = client.meta.region_name
return data
except ClientError as error:
if error.response['Error']['Code'] == 'AccessDeniedException':
print(' {}'.format(func))
print(' FAILURE: MISSING REQUIRED AWS PERMISSIONS')
else:
print('Unknown Error:\n{}'.format(error))
return []
def main(args, awsattack_main):
fields = ['load_balancers']
session = awsattack_main.get_active_session()
print = awsattack_main.print
get_regions = awsattack_main.get_regions
lightsail_data = setup_storage(fields)
regions = get_regions('lightsail')
for region in regions:
print('Starting region {}...'.format(region))
client = awsattack_main.get_boto3_client('lightsail', region)
for field in fields:
lightsail_data[field].extend(fetch_lightsail_data(client, field, print))
summary_data = {'regions': regions}
for field in lightsail_data:
summary_data[field] = len(lightsail_data[field])
session.update(awsattack_main.database, Lightsail=lightsail_data)
return summary_data
| 32.816901 | 258 | 0.660944 |
import datetime
import argparse
from botocore.exceptions import ClientError
def setup_storage(fields):
out = {}
for field in fields:
out[field] = []
return out
def camelCase(name):
splitted = name.split('_')
out = splitted[0]
for word in splitted[1:]:
out += word[0].upper() + word[1:]
return out
def fetch_lightsail_data(client, func, print):
caller = getattr(client, 'get_' + func)
try:
response = caller()
data = response[camelCase(func)]
while 'nextPageToken' in response:
response = caller(pageToken=response['nextPageToken'])
data.extend(response[camelCase(func)])
print(' Found {} {}'.format(len(data), func))
if func != 'active_names':
for resource in data:
resource['region'] = client.meta.region_name
return data
except ClientError as error:
if error.response['Error']['Code'] == 'AccessDeniedException':
print(' {}'.format(func))
print(' FAILURE: MISSING REQUIRED AWS PERMISSIONS')
else:
print('Unknown Error:\n{}'.format(error))
return []
def main(args, awsattack_main):
fields = ['load_balancers']
session = awsattack_main.get_active_session()
print = awsattack_main.print
get_regions = awsattack_main.get_regions
lightsail_data = setup_storage(fields)
regions = get_regions('lightsail')
for region in regions:
print('Starting region {}...'.format(region))
client = awsattack_main.get_boto3_client('lightsail', region)
for field in fields:
lightsail_data[field].extend(fetch_lightsail_data(client, field, print))
summary_data = {'regions': regions}
for field in lightsail_data:
summary_data[field] = len(lightsail_data[field])
session.update(awsattack_main.database, Lightsail=lightsail_data)
return summary_data
| true | true |
f72e5a152dd6b8508cca8dbff2620816eff039c7 | 2,550 | py | Python | main.py | DethCount/usb-gamepad | d33dbc851e73f4c3702d50b3ef0e42e0aef39725 | [
"MIT"
] | null | null | null | main.py | DethCount/usb-gamepad | d33dbc851e73f4c3702d50b3ef0e42e0aef39725 | [
"MIT"
] | null | null | null | main.py | DethCount/usb-gamepad | d33dbc851e73f4c3702d50b3ef0e42e0aef39725 | [
"MIT"
] | null | null | null | from inputs import get_gamepad
from asyncio import run
from bleak import BleakClient
from bluetooth_telescope import BluetoothTelescope
async def main():
bluetoothClient = BleakClient('D8:A9:8B:7E:1E:D2')
is_connected = await bluetoothClient.connect()
if not is_connected:
raise Exception('Device not connected')
telescope = BluetoothTelescope(
bluetoothClient,
'0000ffe1-0000-1000-8000-00805f9b34fb',
isEquatorial=True,
lookAt=[[0,0],[0,0],[0,0]],
destination=None
)
maxInt = 2**15 # signed int16
debug = True
while True:
events = get_gamepad()
if debug: print(str(events))
for event in events:
if debug: print(event.timestamp, event.ev_type, event.code, event.state)
if event.ev_type == 'Key':
if debug: print('Key event')
if event.code == 'BTN_THUMBL':
if debug: print('BTN_THUMBL')
await telescope.emergencyStop(0)
elif event.code == 'BTN_THUMBR':
if debug: print('BTN_THUMBR')
await telescope.emergencyStop(1)
elif event.code == 'BTN_TL':
if debug: print('BTN_TL')
await telescope.changeDir(2, 0)
elif event.code == 'BTN_TR':
if debug: print('BTN_TR')
await telescope.changeDir(2, 1)
elif event.ev_type == 'Absolute':
if debug: print('Absolute event')
if event.code == 'ABS_X':
if debug: print('ABS_X')
await telescope.move(0, 0, event.state / maxInt)
elif event.code == 'ABS_Y':
if debug: print('ABS_Y')
await telescope.move(0, 1, event.state / maxInt)
elif event.code == 'ABS_Z':
if debug: print('ABS_Z')
await telescope.move(2, 0, event.state / 1024)
elif event.code == 'ABS_RX':
if debug: print('ABS_RX')
await telescope.move(1, 0, event.state / maxInt)
elif event.code == 'ABS_RY':
if debug: print('ABS_RY')
await telescope.move(1, 1, event.state / maxInt)
elif event.code == 'ABS_RZ':
if debug: print('ABS_RZ')
await telescope.move(2, 1, event.state / 1024)
if __name__ == "__main__":
run(main())
| 38.636364 | 84 | 0.521176 | from inputs import get_gamepad
from asyncio import run
from bleak import BleakClient
from bluetooth_telescope import BluetoothTelescope
async def main():
bluetoothClient = BleakClient('D8:A9:8B:7E:1E:D2')
is_connected = await bluetoothClient.connect()
if not is_connected:
raise Exception('Device not connected')
telescope = BluetoothTelescope(
bluetoothClient,
'0000ffe1-0000-1000-8000-00805f9b34fb',
isEquatorial=True,
lookAt=[[0,0],[0,0],[0,0]],
destination=None
)
maxInt = 2**15
debug = True
while True:
events = get_gamepad()
if debug: print(str(events))
for event in events:
if debug: print(event.timestamp, event.ev_type, event.code, event.state)
if event.ev_type == 'Key':
if debug: print('Key event')
if event.code == 'BTN_THUMBL':
if debug: print('BTN_THUMBL')
await telescope.emergencyStop(0)
elif event.code == 'BTN_THUMBR':
if debug: print('BTN_THUMBR')
await telescope.emergencyStop(1)
elif event.code == 'BTN_TL':
if debug: print('BTN_TL')
await telescope.changeDir(2, 0)
elif event.code == 'BTN_TR':
if debug: print('BTN_TR')
await telescope.changeDir(2, 1)
elif event.ev_type == 'Absolute':
if debug: print('Absolute event')
if event.code == 'ABS_X':
if debug: print('ABS_X')
await telescope.move(0, 0, event.state / maxInt)
elif event.code == 'ABS_Y':
if debug: print('ABS_Y')
await telescope.move(0, 1, event.state / maxInt)
elif event.code == 'ABS_Z':
if debug: print('ABS_Z')
await telescope.move(2, 0, event.state / 1024)
elif event.code == 'ABS_RX':
if debug: print('ABS_RX')
await telescope.move(1, 0, event.state / maxInt)
elif event.code == 'ABS_RY':
if debug: print('ABS_RY')
await telescope.move(1, 1, event.state / maxInt)
elif event.code == 'ABS_RZ':
if debug: print('ABS_RZ')
await telescope.move(2, 1, event.state / 1024)
if __name__ == "__main__":
run(main())
| true | true |
f72e5ab7b72290d97a6c8c05e9dbdf1113f6f9dd | 630 | py | Python | __scraping__/snapdeal.com - scrapy/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 140 | 2017-02-21T22:49:04.000Z | 2022-03-22T17:51:58.000Z | __scraping__/snapdeal.com - scrapy/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 5 | 2017-12-02T19:55:00.000Z | 2021-09-22T23:18:39.000Z | __scraping__/snapdeal.com - scrapy/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 79 | 2017-01-25T10:53:33.000Z | 2022-03-11T16:13:57.000Z | #!/usr/bin/env python3
#
# https://stackoverflow.com/a/48035123/1832058
#
import scrapy
from scrapy.commands.view import open_in_browser
class MySpider(scrapy.Spider):
name = 'myspider'
start_urls = ['https://www.snapdeal.com/']
def parse(self, response):
print('url:', response.url)
#open_in_browser(response)
for item in response.xpath('//*[@class="catText"]/text()').extract():
print(item)
# --- it runs without project ---
from scrapy.crawler import CrawlerProcess
c = CrawlerProcess({
'USER_AGENT': 'Mozilla/5.0',
})
c.crawl(MySpider)
c.start()
| 19.090909 | 77 | 0.636508 |
import scrapy
from scrapy.commands.view import open_in_browser
class MySpider(scrapy.Spider):
name = 'myspider'
start_urls = ['https://www.snapdeal.com/']
def parse(self, response):
print('url:', response.url)
for item in response.xpath('//*[@class="catText"]/text()').extract():
print(item)
from scrapy.crawler import CrawlerProcess
c = CrawlerProcess({
'USER_AGENT': 'Mozilla/5.0',
})
c.crawl(MySpider)
c.start()
| true | true |
f72e5bf02fe24be9c7d26524c942c1a103093359 | 4,882 | py | Python | flora/daemon/client.py | fugginoob/flora-blockchain | e57733fdc5ca3627ac5c4aa9a4a5707fb91267be | [
"Apache-2.0"
] | null | null | null | flora/daemon/client.py | fugginoob/flora-blockchain | e57733fdc5ca3627ac5c4aa9a4a5707fb91267be | [
"Apache-2.0"
] | null | null | null | flora/daemon/client.py | fugginoob/flora-blockchain | e57733fdc5ca3627ac5c4aa9a4a5707fb91267be | [
"Apache-2.0"
] | null | null | null | import asyncio
import json
import ssl
from pathlib import Path
from typing import Any, Dict, Optional
import websockets
from flora.server.server import ssl_context_for_client
from flora.types.blockchain_format.sized_bytes import bytes32
from flora.util.config import load_config
from flora.util.json_util import dict_to_json_str
from flora.util.ws_message import WsRpcMessage, create_payload_dict
class DaemonProxy:
def __init__(self, uri: str, ssl_context: Optional[ssl.SSLContext]):
self._uri = uri
self._request_dict: Dict[bytes32, asyncio.Event] = {}
self.response_dict: Dict[bytes32, Any] = {}
self.ssl_context = ssl_context
def format_request(self, command: str, data: Dict[str, Any]) -> WsRpcMessage:
request = create_payload_dict(command, data, "client", "daemon")
return request
async def start(self):
self.websocket = await websockets.connect(self._uri, max_size=None, ssl=self.ssl_context)
async def listener():
while True:
try:
message = await self.websocket.recv()
except websockets.exceptions.ConnectionClosedOK:
return None
decoded = json.loads(message)
id = decoded["request_id"]
if id in self._request_dict:
if id in self._request_dict:
self.response_dict[id] = decoded
self._request_dict[id].set()
asyncio.create_task(listener())
await asyncio.sleep(1)
async def _get(self, request: WsRpcMessage) -> WsRpcMessage:
request_id = request["request_id"]
self._request_dict[request_id] = asyncio.Event()
string = dict_to_json_str(request)
asyncio.create_task(self.websocket.send(string))
async def timeout():
await asyncio.sleep(30)
if request_id in self._request_dict:
print("Error, timeout.")
self._request_dict[request_id].set()
asyncio.create_task(timeout())
await self._request_dict[request_id].wait()
if request_id in self.response_dict:
response = self.response_dict[request_id]
self.response_dict.pop(request_id)
else:
response = None
self._request_dict.pop(request_id)
return response
async def start_service(self, service_name: str) -> WsRpcMessage:
data = {"service": service_name}
request = self.format_request("start_service", data)
response = await self._get(request)
return response
async def stop_service(self, service_name: str, delay_before_kill: int = 15) -> WsRpcMessage:
data = {"service": service_name}
request = self.format_request("stop_service", data)
response = await self._get(request)
return response
async def is_running(self, service_name: str) -> bool:
data = {"service": service_name}
request = self.format_request("is_running", data)
response = await self._get(request)
if "is_running" in response["data"]:
return bool(response["data"]["is_running"])
return False
async def ping(self) -> WsRpcMessage:
request = self.format_request("ping", {})
response = await self._get(request)
return response
async def close(self) -> None:
await self.websocket.close()
async def exit(self) -> WsRpcMessage:
request = self.format_request("exit", {})
return await self._get(request)
async def connect_to_daemon(self_hostname: str, daemon_port: int, ssl_context: Optional[ssl.SSLContext]) -> DaemonProxy:
"""
Connect to the local daemon.
"""
client = DaemonProxy(f"wss://{self_hostname}:{daemon_port}", ssl_context)
await client.start()
return client
async def connect_to_daemon_and_validate(root_path: Path) -> Optional[DaemonProxy]:
"""
Connect to the local daemon and do a ping to ensure that something is really
there and running.
"""
try:
net_config = load_config(root_path, "config.yaml")
crt_path = root_path / net_config["daemon_ssl"]["private_crt"]
key_path = root_path / net_config["daemon_ssl"]["private_key"]
ca_crt_path = root_path / net_config["private_ssl_ca"]["crt"]
ca_key_path = root_path / net_config["private_ssl_ca"]["key"]
ssl_context = ssl_context_for_client(ca_crt_path, ca_key_path, crt_path, key_path)
connection = await connect_to_daemon(net_config["self_hostname"], net_config["daemon_port"], ssl_context)
r = await connection.ping()
if "value" in r["data"] and r["data"]["value"] == "pong":
return connection
except Exception:
print("Daemon not started yet")
return None
return None
| 36.432836 | 120 | 0.648505 | import asyncio
import json
import ssl
from pathlib import Path
from typing import Any, Dict, Optional
import websockets
from flora.server.server import ssl_context_for_client
from flora.types.blockchain_format.sized_bytes import bytes32
from flora.util.config import load_config
from flora.util.json_util import dict_to_json_str
from flora.util.ws_message import WsRpcMessage, create_payload_dict
class DaemonProxy:
def __init__(self, uri: str, ssl_context: Optional[ssl.SSLContext]):
self._uri = uri
self._request_dict: Dict[bytes32, asyncio.Event] = {}
self.response_dict: Dict[bytes32, Any] = {}
self.ssl_context = ssl_context
def format_request(self, command: str, data: Dict[str, Any]) -> WsRpcMessage:
request = create_payload_dict(command, data, "client", "daemon")
return request
async def start(self):
self.websocket = await websockets.connect(self._uri, max_size=None, ssl=self.ssl_context)
async def listener():
while True:
try:
message = await self.websocket.recv()
except websockets.exceptions.ConnectionClosedOK:
return None
decoded = json.loads(message)
id = decoded["request_id"]
if id in self._request_dict:
if id in self._request_dict:
self.response_dict[id] = decoded
self._request_dict[id].set()
asyncio.create_task(listener())
await asyncio.sleep(1)
async def _get(self, request: WsRpcMessage) -> WsRpcMessage:
request_id = request["request_id"]
self._request_dict[request_id] = asyncio.Event()
string = dict_to_json_str(request)
asyncio.create_task(self.websocket.send(string))
async def timeout():
await asyncio.sleep(30)
if request_id in self._request_dict:
print("Error, timeout.")
self._request_dict[request_id].set()
asyncio.create_task(timeout())
await self._request_dict[request_id].wait()
if request_id in self.response_dict:
response = self.response_dict[request_id]
self.response_dict.pop(request_id)
else:
response = None
self._request_dict.pop(request_id)
return response
async def start_service(self, service_name: str) -> WsRpcMessage:
data = {"service": service_name}
request = self.format_request("start_service", data)
response = await self._get(request)
return response
async def stop_service(self, service_name: str, delay_before_kill: int = 15) -> WsRpcMessage:
data = {"service": service_name}
request = self.format_request("stop_service", data)
response = await self._get(request)
return response
async def is_running(self, service_name: str) -> bool:
data = {"service": service_name}
request = self.format_request("is_running", data)
response = await self._get(request)
if "is_running" in response["data"]:
return bool(response["data"]["is_running"])
return False
async def ping(self) -> WsRpcMessage:
request = self.format_request("ping", {})
response = await self._get(request)
return response
async def close(self) -> None:
await self.websocket.close()
async def exit(self) -> WsRpcMessage:
request = self.format_request("exit", {})
return await self._get(request)
async def connect_to_daemon(self_hostname: str, daemon_port: int, ssl_context: Optional[ssl.SSLContext]) -> DaemonProxy:
client = DaemonProxy(f"wss://{self_hostname}:{daemon_port}", ssl_context)
await client.start()
return client
async def connect_to_daemon_and_validate(root_path: Path) -> Optional[DaemonProxy]:
try:
net_config = load_config(root_path, "config.yaml")
crt_path = root_path / net_config["daemon_ssl"]["private_crt"]
key_path = root_path / net_config["daemon_ssl"]["private_key"]
ca_crt_path = root_path / net_config["private_ssl_ca"]["crt"]
ca_key_path = root_path / net_config["private_ssl_ca"]["key"]
ssl_context = ssl_context_for_client(ca_crt_path, ca_key_path, crt_path, key_path)
connection = await connect_to_daemon(net_config["self_hostname"], net_config["daemon_port"], ssl_context)
r = await connection.ping()
if "value" in r["data"] and r["data"]["value"] == "pong":
return connection
except Exception:
print("Daemon not started yet")
return None
return None
| true | true |
f72e5d21c42211531f0a523940feab958c3bc38b | 5,561 | py | Python | nipype/info.py | effigies/nipype | 18fe222557cf3b9627e06b2a66fba589feaca581 | [
"Apache-2.0"
] | null | null | null | nipype/info.py | effigies/nipype | 18fe222557cf3b9627e06b2a66fba589feaca581 | [
"Apache-2.0"
] | 2 | 2017-10-05T21:08:38.000Z | 2018-10-09T23:01:23.000Z | nipype/info.py | effigies/nipype | 18fe222557cf3b9627e06b2a66fba589feaca581 | [
"Apache-2.0"
] | null | null | null | """ This file contains defines parameters for nipy that we use to fill
settings in setup.py, the nipy top-level docstring, and for building the
docs. In setup.py in particular, we exec this file, so it cannot import nipy
"""
# nipype version information. An empty _version_extra corresponds to a
# full release. '.dev' as a _version_extra string means this is a development
# version
_version_major = 0
_version_minor = 13
_version_micro = 0
_version_extra = '-dev' # Remove -dev for release
def get_nipype_gitversion():
"""Nipype version as reported by the last commit in git
Returns
-------
None or str
Version of NiPype according to git.
"""
import os
import subprocess
try:
import nipype
gitpath = os.path.realpath(os.path.join(os.path.dirname(nipype.__file__),
os.path.pardir))
except:
gitpath = os.getcwd()
gitpathgit = os.path.join(gitpath, '.git')
if not os.path.exists(gitpathgit):
return None
ver = None
try:
o, _ = subprocess.Popen('git describe', shell=True, cwd=gitpath,
stdout=subprocess.PIPE).communicate()
except Exception:
pass
else:
ver = o.decode().strip().split('-')[-1]
return ver
if '-dev' in _version_extra:
gitversion = get_nipype_gitversion()
if gitversion:
_version_extra = '-' + gitversion + '.dev'
# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
__version__ = "%s.%s.%s%s" % (_version_major,
_version_minor,
_version_micro,
_version_extra)
CLASSIFIERS = ["Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Scientific/Engineering"]
description = 'Neuroimaging in Python: Pipelines and Interfaces'
# Note: this long_description is actually a copy/paste from the top-level
# README.txt, so that it shows up nicely on PyPI. So please remember to edit
# it only in one place and sync it correctly.
long_description = \
"""
========================================================
NIPYPE: Neuroimaging in Python: Pipelines and Interfaces
========================================================
Current neuroimaging software offer users an incredible opportunity to
analyze data using a variety of different algorithms. However, this has
resulted in a heterogeneous collection of specialized applications
without transparent interoperability or a uniform operating interface.
*Nipype*, an open-source, community-developed initiative under the
umbrella of NiPy_, is a Python project that provides a uniform interface
to existing neuroimaging software and facilitates interaction between
these packages within a single workflow. Nipype provides an environment
that encourages interactive exploration of algorithms from different
packages (e.g., AFNI, ANTS, BRAINS, BrainSuite, Camino, FreeSurfer, FSL, MNE,
MRtrix, MNE, Nipy, Slicer, SPM), eases the design of workflows within and
between packages, and reduces the learning curve necessary to use different
packages. Nipype is creating a collaborative platform for neuroimaging software
development in a high-level language and addressing limitations of existing
pipeline systems.
*Nipype* allows you to:
* easily interact with tools from different software packages
* combine processing steps from different software packages
* develop new workflows faster by reusing common steps from old ones
* process data faster by running it in parallel on many cores/machines
* make your research easily reproducible
* share your processing workflows with the community
"""
# versions
NIBABEL_MIN_VERSION = '2.0.1'
NETWORKX_MIN_VERSION = '1.7'
NUMPY_MIN_VERSION = '1.6.2'
SCIPY_MIN_VERSION = '0.11'
TRAITS_MIN_VERSION = '4.3'
DATEUTIL_MIN_VERSION = '1.5'
NOSE_MIN_VERSION = '1.2'
FUTURE_MIN_VERSION = '0.15.2'
SIMPLEJSON_MIN_VERSION = '3.8.0'
PROV_MIN_VERSION = '1.4.0'
NAME = 'nipype'
MAINTAINER = "nipype developers"
MAINTAINER_EMAIL = "neuroimaging@python.org"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "http://nipy.org/nipype"
DOWNLOAD_URL = "http://github.com/nipy/nipype/archives/master"
LICENSE = "Apache License, 2.0"
CLASSIFIERS = CLASSIFIERS
AUTHOR = "nipype developers"
AUTHOR_EMAIL = "neuroimaging@python.org"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
ISRELEASE = _version_extra == ''
VERSION = __version__
PROVIDES = ['nipype']
REQUIRES = ["nibabel>=%s" % NIBABEL_MIN_VERSION,
"networkx>=%s" % NETWORKX_MIN_VERSION,
"numpy>=%s" % NUMPY_MIN_VERSION,
"python-dateutil>=%s" % DATEUTIL_MIN_VERSION,
"scipy>=%s" % SCIPY_MIN_VERSION,
"traits>=%s" % TRAITS_MIN_VERSION,
"nose>=%s" % NOSE_MIN_VERSION,
"future>=%s" % FUTURE_MIN_VERSION,
"simplejson>=%s" % SIMPLEJSON_MIN_VERSION,
"prov>=%s" % PROV_MIN_VERSION,
"mock",
"xvfbwrapper"]
STATUS = 'stable'
| 37.574324 | 81 | 0.670203 |
_version_major = 0
_version_minor = 13
_version_micro = 0
_version_extra = '-dev'
def get_nipype_gitversion():
import os
import subprocess
try:
import nipype
gitpath = os.path.realpath(os.path.join(os.path.dirname(nipype.__file__),
os.path.pardir))
except:
gitpath = os.getcwd()
gitpathgit = os.path.join(gitpath, '.git')
if not os.path.exists(gitpathgit):
return None
ver = None
try:
o, _ = subprocess.Popen('git describe', shell=True, cwd=gitpath,
stdout=subprocess.PIPE).communicate()
except Exception:
pass
else:
ver = o.decode().strip().split('-')[-1]
return ver
if '-dev' in _version_extra:
gitversion = get_nipype_gitversion()
if gitversion:
_version_extra = '-' + gitversion + '.dev'
__version__ = "%s.%s.%s%s" % (_version_major,
_version_minor,
_version_micro,
_version_extra)
CLASSIFIERS = ["Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Scientific/Engineering"]
description = 'Neuroimaging in Python: Pipelines and Interfaces'
long_description = \
"""
========================================================
NIPYPE: Neuroimaging in Python: Pipelines and Interfaces
========================================================
Current neuroimaging software offer users an incredible opportunity to
analyze data using a variety of different algorithms. However, this has
resulted in a heterogeneous collection of specialized applications
without transparent interoperability or a uniform operating interface.
*Nipype*, an open-source, community-developed initiative under the
umbrella of NiPy_, is a Python project that provides a uniform interface
to existing neuroimaging software and facilitates interaction between
these packages within a single workflow. Nipype provides an environment
that encourages interactive exploration of algorithms from different
packages (e.g., AFNI, ANTS, BRAINS, BrainSuite, Camino, FreeSurfer, FSL, MNE,
MRtrix, MNE, Nipy, Slicer, SPM), eases the design of workflows within and
between packages, and reduces the learning curve necessary to use different
packages. Nipype is creating a collaborative platform for neuroimaging software
development in a high-level language and addressing limitations of existing
pipeline systems.
*Nipype* allows you to:
* easily interact with tools from different software packages
* combine processing steps from different software packages
* develop new workflows faster by reusing common steps from old ones
* process data faster by running it in parallel on many cores/machines
* make your research easily reproducible
* share your processing workflows with the community
"""
NIBABEL_MIN_VERSION = '2.0.1'
NETWORKX_MIN_VERSION = '1.7'
NUMPY_MIN_VERSION = '1.6.2'
SCIPY_MIN_VERSION = '0.11'
TRAITS_MIN_VERSION = '4.3'
DATEUTIL_MIN_VERSION = '1.5'
NOSE_MIN_VERSION = '1.2'
FUTURE_MIN_VERSION = '0.15.2'
SIMPLEJSON_MIN_VERSION = '3.8.0'
PROV_MIN_VERSION = '1.4.0'
NAME = 'nipype'
MAINTAINER = "nipype developers"
MAINTAINER_EMAIL = "neuroimaging@python.org"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "http://nipy.org/nipype"
DOWNLOAD_URL = "http://github.com/nipy/nipype/archives/master"
LICENSE = "Apache License, 2.0"
CLASSIFIERS = CLASSIFIERS
AUTHOR = "nipype developers"
AUTHOR_EMAIL = "neuroimaging@python.org"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
ISRELEASE = _version_extra == ''
VERSION = __version__
PROVIDES = ['nipype']
REQUIRES = ["nibabel>=%s" % NIBABEL_MIN_VERSION,
"networkx>=%s" % NETWORKX_MIN_VERSION,
"numpy>=%s" % NUMPY_MIN_VERSION,
"python-dateutil>=%s" % DATEUTIL_MIN_VERSION,
"scipy>=%s" % SCIPY_MIN_VERSION,
"traits>=%s" % TRAITS_MIN_VERSION,
"nose>=%s" % NOSE_MIN_VERSION,
"future>=%s" % FUTURE_MIN_VERSION,
"simplejson>=%s" % SIMPLEJSON_MIN_VERSION,
"prov>=%s" % PROV_MIN_VERSION,
"mock",
"xvfbwrapper"]
STATUS = 'stable'
| true | true |
f72e5d9a801e6d998e27b90743043c49babb6526 | 575 | py | Python | jumpscale/packages/vdc_dashboard/services/provision_wallet_billing.py | threefoldtech/js-sdk | 811f783ac34a60225175bab2d806802a87b9d5c7 | [
"Apache-2.0"
] | 13 | 2020-09-02T09:05:08.000Z | 2022-03-12T02:43:24.000Z | jumpscale/packages/vdc_dashboard/services/provision_wallet_billing.py | threefoldtech/js-sdk | 811f783ac34a60225175bab2d806802a87b9d5c7 | [
"Apache-2.0"
] | 1,998 | 2020-06-15T11:46:10.000Z | 2022-03-24T22:12:41.000Z | jumpscale/packages/vdc_dashboard/services/provision_wallet_billing.py | threefoldtech/js-sdk | 811f783ac34a60225175bab2d806802a87b9d5c7 | [
"Apache-2.0"
] | 8 | 2020-09-29T06:50:35.000Z | 2021-06-14T03:30:52.000Z | from jumpscale.loader import j
from jumpscale.packages.vdc.billing import auto_extend_billing
from jumpscale.tools.servicemanager.servicemanager import BackgroundService
class AutoExtendbillingService(BackgroundService):
def __init__(self, interval=60 * 60, *args, **kwargs):
"""Provisioning wallet service that will run every hour to extend the VDC pool
"""
super().__init__(interval, *args, **kwargs)
def job(self):
auto_extend_billing()
j.logger.info("Auto extend billing service")
service = AutoExtendbillingService()
| 31.944444 | 86 | 0.735652 | from jumpscale.loader import j
from jumpscale.packages.vdc.billing import auto_extend_billing
from jumpscale.tools.servicemanager.servicemanager import BackgroundService
class AutoExtendbillingService(BackgroundService):
def __init__(self, interval=60 * 60, *args, **kwargs):
super().__init__(interval, *args, **kwargs)
def job(self):
auto_extend_billing()
j.logger.info("Auto extend billing service")
service = AutoExtendbillingService()
| true | true |
f72e5e2b889876badd696ef1800deb4ada0a2f13 | 4,955 | py | Python | timeeval_experiments/generator/param_config_gen.py | HPI-Information-Systems/TimeEval | 9b2717b89decd57dd09e04ad94c120f13132d7b8 | [
"MIT"
] | 2 | 2022-01-29T03:46:31.000Z | 2022-02-14T14:06:35.000Z | timeeval_experiments/generator/param_config_gen.py | HPI-Information-Systems/TimeEval | 9b2717b89decd57dd09e04ad94c120f13132d7b8 | [
"MIT"
] | null | null | null | timeeval_experiments/generator/param_config_gen.py | HPI-Information-Systems/TimeEval | 9b2717b89decd57dd09e04ad94c120f13132d7b8 | [
"MIT"
] | null | null | null | import json
import os
import warnings
from pathlib import Path
from typing import Union, Dict, Any
from .parameter_matrix_parsing import ParameterMatrixProxy
class ParamConfigGenerator:
FIXED_KEY = "fixed_params"
SHARED_KEY = "shared_params"
DEPENDENT_KEY = "dependent_params"
OPTIMIZED_KEY = "optimized_params"
HEURISTIC_MAPPING_KEY = "__heuristic_function_mapping"
OVERWRITES_KEY = "__algorithm_overwrites"
def __init__(self, matrix_path: Union[str, Path]):
self.pmp = ParameterMatrixProxy(matrix_path)
def generate_template(self, target: Union[str, Path]) -> None:
target = Path(target)
config = {
self.FIXED_KEY: self.pmp.fixed_params(),
self.SHARED_KEY: self.pmp.shared_params(),
self.DEPENDENT_KEY: self.pmp.dependent_params(),
self.OPTIMIZED_KEY: self.pmp.optimized_params(),
self.HEURISTIC_MAPPING_KEY: {},
self.OVERWRITES_KEY: {}
}
self._write(config, target)
def generate(self, target: Union[str, Path], overwrite: bool = False) -> None:
target = Path(target)
if overwrite or not target.exists():
self.generate_template(target)
return
config = {}
if target.exists() and target.is_file():
with target.open("r") as fh:
config = json.load(fh)
config[self.FIXED_KEY] = self.pmp.fixed_params()
if self.SHARED_KEY in config:
self._merge_shared(config)
else:
config[self.SHARED_KEY] = self.pmp.shared_params()
config[self.DEPENDENT_KEY] = self.pmp.dependent_params()
if self.OPTIMIZED_KEY in config:
self._merge_optimized(config)
else:
config[self.OPTIMIZED_KEY] = self.pmp.optimized_params()
self._write(config, target)
def _merge_shared(self, config: Dict[str, Any]) -> None:
shared_params = config[self.SHARED_KEY]
new_shared_params = self.pmp.shared_params()
params = set(list(shared_params.keys()) + list(new_shared_params.keys()))
for param in params:
if param in shared_params and param in new_shared_params:
shared_params[param]["algorithms"] = new_shared_params[param]["algorithms"]
shared_params[param]["search_space"] = new_shared_params[param]["search_space"]
elif param not in shared_params:
shared_params[param] = new_shared_params[param]
else: # param not in new_shared_params:
del shared_params[param]
config[self.SHARED_KEY] = shared_params
def _merge_optimized(self, config: Dict[str, Any]) -> None:
optim_params = config[self.OPTIMIZED_KEY]
new_optim_params = self.pmp.optimized_params()
params = set(list(optim_params.keys()) + list(new_optim_params.keys()))
for param in params:
if param not in new_optim_params:
del optim_params[param]
continue
if param in new_optim_params:
new_param_config = new_optim_params[param]
if isinstance(new_param_config, dict) and "MANUAL" in new_param_config.values():
if param in optim_params and isinstance(optim_params[param], dict):
warnings.warn(f"{self.OPTIMIZED_KEY}: Found 'MANUAL' marker for parameter {param}. "
"Using existing value(s).")
param_config = optim_params[param]
to_change_algos = []
for algo in new_param_config:
if new_param_config[algo] == "MANUAL" and algo not in param_config:
to_change_algos.append(algo)
for algo in to_change_algos:
param_config[algo] = new_param_config[algo]
warnings.warn(f"{self.OPTIMIZED_KEY}: Found 'MANUAL' marker for parameter {param} and "
f"algorithm {algo}. Please set value(s) after the generation step manually!")
continue
else:
warnings.warn(f"{self.OPTIMIZED_KEY}: Found 'MANUAL' marker for parameter {param}. Please "
"set value(s) after the generation step manually!")
# for everything else:
optim_params[param] = new_optim_params[param]
config[self.OPTIMIZED_KEY] = optim_params
@staticmethod
def _write(config: Dict[str, Any], target: Path) -> None:
with target.open("w") as fh:
json.dump(config, fh, sort_keys=True, indent=2)
fh.write(os.linesep)
if __name__ == "__main__":
p = ParamConfigGenerator("timeeval_experiments/parameter-matrix.csv")
p.generate("timeeval_experiments/params.json")
| 43.086957 | 119 | 0.605247 | import json
import os
import warnings
from pathlib import Path
from typing import Union, Dict, Any
from .parameter_matrix_parsing import ParameterMatrixProxy
class ParamConfigGenerator:
FIXED_KEY = "fixed_params"
SHARED_KEY = "shared_params"
DEPENDENT_KEY = "dependent_params"
OPTIMIZED_KEY = "optimized_params"
HEURISTIC_MAPPING_KEY = "__heuristic_function_mapping"
OVERWRITES_KEY = "__algorithm_overwrites"
def __init__(self, matrix_path: Union[str, Path]):
self.pmp = ParameterMatrixProxy(matrix_path)
def generate_template(self, target: Union[str, Path]) -> None:
target = Path(target)
config = {
self.FIXED_KEY: self.pmp.fixed_params(),
self.SHARED_KEY: self.pmp.shared_params(),
self.DEPENDENT_KEY: self.pmp.dependent_params(),
self.OPTIMIZED_KEY: self.pmp.optimized_params(),
self.HEURISTIC_MAPPING_KEY: {},
self.OVERWRITES_KEY: {}
}
self._write(config, target)
def generate(self, target: Union[str, Path], overwrite: bool = False) -> None:
target = Path(target)
if overwrite or not target.exists():
self.generate_template(target)
return
config = {}
if target.exists() and target.is_file():
with target.open("r") as fh:
config = json.load(fh)
config[self.FIXED_KEY] = self.pmp.fixed_params()
if self.SHARED_KEY in config:
self._merge_shared(config)
else:
config[self.SHARED_KEY] = self.pmp.shared_params()
config[self.DEPENDENT_KEY] = self.pmp.dependent_params()
if self.OPTIMIZED_KEY in config:
self._merge_optimized(config)
else:
config[self.OPTIMIZED_KEY] = self.pmp.optimized_params()
self._write(config, target)
def _merge_shared(self, config: Dict[str, Any]) -> None:
shared_params = config[self.SHARED_KEY]
new_shared_params = self.pmp.shared_params()
params = set(list(shared_params.keys()) + list(new_shared_params.keys()))
for param in params:
if param in shared_params and param in new_shared_params:
shared_params[param]["algorithms"] = new_shared_params[param]["algorithms"]
shared_params[param]["search_space"] = new_shared_params[param]["search_space"]
elif param not in shared_params:
shared_params[param] = new_shared_params[param]
else:
del shared_params[param]
config[self.SHARED_KEY] = shared_params
def _merge_optimized(self, config: Dict[str, Any]) -> None:
optim_params = config[self.OPTIMIZED_KEY]
new_optim_params = self.pmp.optimized_params()
params = set(list(optim_params.keys()) + list(new_optim_params.keys()))
for param in params:
if param not in new_optim_params:
del optim_params[param]
continue
if param in new_optim_params:
new_param_config = new_optim_params[param]
if isinstance(new_param_config, dict) and "MANUAL" in new_param_config.values():
if param in optim_params and isinstance(optim_params[param], dict):
warnings.warn(f"{self.OPTIMIZED_KEY}: Found 'MANUAL' marker for parameter {param}. "
"Using existing value(s).")
param_config = optim_params[param]
to_change_algos = []
for algo in new_param_config:
if new_param_config[algo] == "MANUAL" and algo not in param_config:
to_change_algos.append(algo)
for algo in to_change_algos:
param_config[algo] = new_param_config[algo]
warnings.warn(f"{self.OPTIMIZED_KEY}: Found 'MANUAL' marker for parameter {param} and "
f"algorithm {algo}. Please set value(s) after the generation step manually!")
continue
else:
warnings.warn(f"{self.OPTIMIZED_KEY}: Found 'MANUAL' marker for parameter {param}. Please "
"set value(s) after the generation step manually!")
optim_params[param] = new_optim_params[param]
config[self.OPTIMIZED_KEY] = optim_params
@staticmethod
def _write(config: Dict[str, Any], target: Path) -> None:
with target.open("w") as fh:
json.dump(config, fh, sort_keys=True, indent=2)
fh.write(os.linesep)
if __name__ == "__main__":
p = ParamConfigGenerator("timeeval_experiments/parameter-matrix.csv")
p.generate("timeeval_experiments/params.json")
| true | true |
f72e5e5572e81441c9fb52541a31b92eb5db8aeb | 983 | py | Python | setup.py | UdbhavPrasad072300/GANs-Implementations | 60aee8a48dc3cf3a6f1240f44ff9bf6c138e3e38 | [
"MIT"
] | 4 | 2021-01-24T02:43:02.000Z | 2021-09-10T01:26:27.000Z | setup.py | UdbhavPrasad072300/GANs-Implementations | 60aee8a48dc3cf3a6f1240f44ff9bf6c138e3e38 | [
"MIT"
] | null | null | null | setup.py | UdbhavPrasad072300/GANs-Implementations | 60aee8a48dc3cf3a6f1240f44ff9bf6c138e3e38 | [
"MIT"
] | 1 | 2021-09-11T07:53:45.000Z | 2021-09-11T07:53:45.000Z | from setuptools import setup, find_packages
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Programming Language :: Python :: 3 :: Only',
]
setup(
name='gans_implementations',
version='0.1.0',
description='A bunch of GAN implementations',
long_description=long_description,
long_description_content_type='text/markdown',
author='Udbhav Prasad',
author_email='udbhavprasad072300@gmail.com',
url='https://github.com/UdbhavPrasad072300/GANs-Implementations',
license='MIT',
py_modules=[""],
classifiers=classifiers,
packages=find_packages(),
)
| 31.709677 | 73 | 0.704985 | from setuptools import setup, find_packages
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Programming Language :: Python :: 3 :: Only',
]
setup(
name='gans_implementations',
version='0.1.0',
description='A bunch of GAN implementations',
long_description=long_description,
long_description_content_type='text/markdown',
author='Udbhav Prasad',
author_email='udbhavprasad072300@gmail.com',
url='https://github.com/UdbhavPrasad072300/GANs-Implementations',
license='MIT',
py_modules=[""],
classifiers=classifiers,
packages=find_packages(),
)
| true | true |
f72e5e5770402f322f58ec67f33bddc036118b03 | 4,641 | py | Python | yandex/cloud/ydb/v1/location_service_pb2_grpc.py | korsar182/python-sdk | 873bf2a9b136a8f2faae72e86fae1f5b5c3d896a | [
"MIT"
] | 36 | 2018-12-23T13:51:50.000Z | 2022-03-25T07:48:24.000Z | yandex/cloud/ydb/v1/location_service_pb2_grpc.py | korsar182/python-sdk | 873bf2a9b136a8f2faae72e86fae1f5b5c3d896a | [
"MIT"
] | 15 | 2019-02-28T04:55:09.000Z | 2022-03-06T23:17:24.000Z | yandex/cloud/ydb/v1/location_service_pb2_grpc.py | korsar182/python-sdk | 873bf2a9b136a8f2faae72e86fae1f5b5c3d896a | [
"MIT"
] | 18 | 2019-02-23T07:10:57.000Z | 2022-03-28T14:41:08.000Z | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from yandex.cloud.ydb.v1 import location_pb2 as yandex_dot_cloud_dot_ydb_dot_v1_dot_location__pb2
from yandex.cloud.ydb.v1 import location_service_pb2 as yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2
class LocationServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Get = channel.unary_unary(
'/yandex.cloud.ydb.v1.LocationService/Get',
request_serializer=yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.GetLocationRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_ydb_dot_v1_dot_location__pb2.Location.FromString,
)
self.List = channel.unary_unary(
'/yandex.cloud.ydb.v1.LocationService/List',
request_serializer=yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.ListLocationsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.ListLocationsResponse.FromString,
)
class LocationServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def Get(self, request, context):
"""Returns the specified location.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
"""Returns the list of available locations.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_LocationServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.GetLocationRequest.FromString,
response_serializer=yandex_dot_cloud_dot_ydb_dot_v1_dot_location__pb2.Location.SerializeToString,
),
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.ListLocationsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.ListLocationsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'yandex.cloud.ydb.v1.LocationService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class LocationService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def Get(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.ydb.v1.LocationService/Get',
yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.GetLocationRequest.SerializeToString,
yandex_dot_cloud_dot_ydb_dot_v1_dot_location__pb2.Location.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def List(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.ydb.v1.LocationService/List',
yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.ListLocationsRequest.SerializeToString,
yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.ListLocationsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 45.058252 | 139 | 0.704159 |
import grpc
from yandex.cloud.ydb.v1 import location_pb2 as yandex_dot_cloud_dot_ydb_dot_v1_dot_location__pb2
from yandex.cloud.ydb.v1 import location_service_pb2 as yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2
class LocationServiceStub(object):
def __init__(self, channel):
self.Get = channel.unary_unary(
'/yandex.cloud.ydb.v1.LocationService/Get',
request_serializer=yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.GetLocationRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_ydb_dot_v1_dot_location__pb2.Location.FromString,
)
self.List = channel.unary_unary(
'/yandex.cloud.ydb.v1.LocationService/List',
request_serializer=yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.ListLocationsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.ListLocationsResponse.FromString,
)
class LocationServiceServicer(object):
def Get(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_LocationServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.GetLocationRequest.FromString,
response_serializer=yandex_dot_cloud_dot_ydb_dot_v1_dot_location__pb2.Location.SerializeToString,
),
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.ListLocationsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.ListLocationsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'yandex.cloud.ydb.v1.LocationService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class LocationService(object):
@staticmethod
def Get(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.ydb.v1.LocationService/Get',
yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.GetLocationRequest.SerializeToString,
yandex_dot_cloud_dot_ydb_dot_v1_dot_location__pb2.Location.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def List(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.ydb.v1.LocationService/List',
yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.ListLocationsRequest.SerializeToString,
yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.ListLocationsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| true | true |
f72e5e660b6ecc4715fa7d16284b2b112851a050 | 5,059 | py | Python | server/server/settings.py | raminawahda7/servize | 9941401c72a949c8c2ad83012ab5cc4276355b6c | [
"MIT"
] | null | null | null | server/server/settings.py | raminawahda7/servize | 9941401c72a949c8c2ad83012ab5cc4276355b6c | [
"MIT"
] | null | null | null | server/server/settings.py | raminawahda7/servize | 9941401c72a949c8c2ad83012ab5cc4276355b6c | [
"MIT"
] | null | null | null | """
Django settings for server project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!ju*wb_1y5dcdijc&u&_+mt80mz)jg01-^4_#j-+hm6wd_f7#6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders', # add this
'rest_framework', # add this
# add name of applications here
'Category',
'ServiceProvider',
'SubCategory',
'Location',
'Reviews',
# 'cal'
'User',
'djoser',
'accounts',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware', # add this for connection between front and back
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'server.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'server.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
# setting database
DATABASES = {
'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR / 'db.sqlite3',
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'servizeDB',
'USER': 'postgres',
'PASSWORD': 'rami871995',
'HOST': 'localhost',
'PORT': '5432',
}
}
#setting email for verfication
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = 'servize.verfication@gmail.com'
EMAIL_HOST_PASSWORD = 'hlsucmflzyezxywm'
EMAIL_USE_TLS = True
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
CORS_ALLOW_ALL_ORIGINS=True
CORS_ORIGIN_WHITELIST = (
'http://localhost:8000',
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny'
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
),
}
SIMPLE_JWT = {
'AUTH_HEADER_TYPES': ('JWT',),
}
DJOSER = {
'LOGIN_FIELD': 'email',
'USER_CREATE_PASSWORD_RETYPE': False,
'USERNAME_CHANGED_EMAIL_CONFIRMATION': False,
'PASSWORD_CHANGED_EMAIL_CONFIRMATION': False,
'SEND_CONFIRMATION_EMAIL': False,
'SET_USERNAME_RETYPE': True,
'PASSWORD_RESET_CONFIRM_URL': 'password/reset/confirm/{uid}/{token}',
'SET_PASSWORD_RETYPE': True,
'USERNAME_RESET_CONFIRM_URL': 'email/reset/confirm/{uid}/{token}',
'ACTIVATION_URL': 'activate/{uid}/{token}',
'SEND_ACTIVATION_EMAIL': False,
'SERIALIZERS': {
'user_create': 'accounts.serializers.UserCreateSerializer',
'user': 'accounts.serializers.UserCreateSerializer',
'user_delete': 'djoser.serializers.UserDeleteSerializer',
},
}
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'accounts.UserAccount'
| 25.811224 | 96 | 0.686302 |
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = '!ju*wb_1y5dcdijc&u&_+mt80mz)jg01-^4_#j-+hm6wd_f7#6'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders', # add this
'rest_framework', # add this
# add name of applications here
'Category',
'ServiceProvider',
'SubCategory',
'Location',
'Reviews',
# 'cal'
'User',
'djoser',
'accounts',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware', # add this for connection between front and back
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'server.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'server.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
# setting database
DATABASES = {
'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR / 'db.sqlite3',
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'servizeDB',
'USER': 'postgres',
'PASSWORD': 'rami871995',
'HOST': 'localhost',
'PORT': '5432',
}
}
#setting email for verfication
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = 'servize.verfication@gmail.com'
EMAIL_HOST_PASSWORD = 'hlsucmflzyezxywm'
EMAIL_USE_TLS = True
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
CORS_ALLOW_ALL_ORIGINS=True
CORS_ORIGIN_WHITELIST = (
'http://localhost:8000',
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny'
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
),
}
SIMPLE_JWT = {
'AUTH_HEADER_TYPES': ('JWT',),
}
DJOSER = {
'LOGIN_FIELD': 'email',
'USER_CREATE_PASSWORD_RETYPE': False,
'USERNAME_CHANGED_EMAIL_CONFIRMATION': False,
'PASSWORD_CHANGED_EMAIL_CONFIRMATION': False,
'SEND_CONFIRMATION_EMAIL': False,
'SET_USERNAME_RETYPE': True,
'PASSWORD_RESET_CONFIRM_URL': 'password/reset/confirm/{uid}/{token}',
'SET_PASSWORD_RETYPE': True,
'USERNAME_RESET_CONFIRM_URL': 'email/reset/confirm/{uid}/{token}',
'ACTIVATION_URL': 'activate/{uid}/{token}',
'SEND_ACTIVATION_EMAIL': False,
'SERIALIZERS': {
'user_create': 'accounts.serializers.UserCreateSerializer',
'user': 'accounts.serializers.UserCreateSerializer',
'user_delete': 'djoser.serializers.UserDeleteSerializer',
},
}
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'accounts.UserAccount'
| true | true |
f72e5e66d16ddddaa3f4271d15f9f4fb89a6cc34 | 4,347 | py | Python | augustus/custom/trade_log_analysis.py | jialuechen/augustus | d4fbda427e3d9c60896b0e22c06cd593b484ef9d | [
"MIT"
] | 2 | 2019-09-13T18:49:17.000Z | 2022-01-25T05:14:05.000Z | augustus/custom/trade_log_analysis.py | jialuechen/augustus | d4fbda427e3d9c60896b0e22c06cd593b484ef9d | [
"MIT"
] | null | null | null | augustus/custom/trade_log_analysis.py | jialuechen/augustus | d4fbda427e3d9c60896b0e22c06cd593b484ef9d | [
"MIT"
] | 2 | 2019-02-28T21:23:04.000Z | 2020-07-02T01:23:24.000Z | import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table_experiments as dt
import pandas as pd
import plotly
from dash.dependencies import Input, Output, State
from plotly import graph_objs as go
from augustus.systemabase_env import augustusEnvBase
TRADE_LOG = augustusEnvBase.full_trade_log
APP = dash.Dash()
APP.scripts.config.serve_locally = True
APP.layout = html.Div([
html.H4('augustus Trade Log Analysis'),
dt.DataTable(
rows=TRADE_LOG.to_dict('records'),
row_selectable=True,
filterable=True,
sortable=True,
selected_row_indices=[],
id='trade_log'
),
dcc.Graph(
id='drawdown_pnl'
),
dcc.Graph(
id='run_up_pnl'
),
], className="container")
@APP.callback(
Output('trade_log', 'selected_row_indices'),
[Input('drawdown_pnl', 'clickData')],
[State('trade_log', 'selected_row_indices')])
def update_selected_row_indices(clickData, selected_row_indices):
if clickData:
for point in clickData['points']:
if point['pointNumber'] in selected_row_indices:
selected_row_indices.remove(point['pointNumber'])
else:
selected_row_indices.append(point['pointNumber'])
return selected_row_indices
@APP.callback(
Output('drawdown_pnl', 'figure'),
[Input('trade_log', 'rows'),
Input('trade_log', 'selected_row_indices')])
def update_run_up_figure(rows, selected_row_indices):
dff = pd.DataFrame(rows)
profit_diff = dff.loc[dff.returns_diff > 0]
loss_diff = dff.loc[dff.returns_diff < 0]
fig = plotly.tools.make_subplots(
rows=1, cols=1,
shared_xaxes=True)
fig['layout'].update(dict(title='Profit & Loss vs Run-up'))
fig['layout']['xaxis'].update(dict(title='Run-up(%)'))
fig['layout']['yaxis'].update(dict(title='Profit & Loss(%)'))
fig.append_trace({
'x': profit_diff['run_up']*100,
'y': profit_diff['returns_diff']*100,
'text': profit_diff.entry_date + ' to ' + profit_diff.exit_date,
'type': 'scatter',
'marker': dict(color='black'),
'mode': 'markers',
'name': 'win',
'line': {'width': 1}
}, 1, 1)
fig.append_trace({
'x': loss_diff['run_up']*100,
'y': -loss_diff['returns_diff']*100,
'type': 'scatter',
'text': loss_diff.entry_date + ' to ' + loss_diff.exit_date,
'marker': dict(color='red'),
'mode': 'markers',
'name': 'lose',
'line': {'width': 1}
}, 1, 1)
fig.append_trace({
'x': [0, 10],
'y': [0, 10],
'type': 'scatter',
'mode': 'lines',
'name': 'Win diagonal',
'line': {'width': 1}
}, 1, 1)
return fig
@APP.callback(
Output('run_up_pnl', 'figure'),
[Input('trade_log', 'rows'),
Input('trade_log', 'selected_row_indices')])
def update__drawdown_figure(rows, selected_row_indices):
dff = pd.DataFrame(rows)
profit_diff = dff.loc[dff.returns_diff > 0]
loss_diff = dff.loc[dff.returns_diff < 0]
fig = plotly.tools.make_subplots(
rows=1, cols=1,
shared_xaxes=True)
fig['layout'].update(dict(title='Profit & Loss vs Drawdown'))
fig['layout']['xaxis'].update(dict(title='Drawdown(%)'))
fig['layout']['yaxis'].update(dict(title='Profit & Loss(%)'))
fig.append_trace({
'x': profit_diff['drawdown']*100,
'y': profit_diff['returns_diff']*100,
'type': 'scatter',
'marker': dict(color='black'),
'text': profit_diff.entry_date + ' to ' + profit_diff.exit_date,
'mode': 'markers',
'name': 'win',
'line': {'width': 1}
}, 1, 1)
fig.append_trace({
'x': loss_diff['drawdown']*100,
'y': -loss_diff['returns_diff']*100,
'text': loss_diff.entry_date + ' to ' + loss_diff.exit_date,
'type': 'scatter',
'marker': dict(color='red'),
'mode': 'markers',
'name': 'lose',
'line': {'width': 1}
}, 1, 1)
fig.append_trace({
'x': [0, 10],
'y': [0, 10],
'type': 'scatter',
'mode': 'lines',
'name': 'Loss diagonal',
'line': {'width': 1}
}, 1, 1)
return fig
if __name__ == '__main__':
APP.run_server(debug=True)
| 27.339623 | 72 | 0.586841 | import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table_experiments as dt
import pandas as pd
import plotly
from dash.dependencies import Input, Output, State
from plotly import graph_objs as go
from augustus.systemabase_env import augustusEnvBase
TRADE_LOG = augustusEnvBase.full_trade_log
APP = dash.Dash()
APP.scripts.config.serve_locally = True
APP.layout = html.Div([
html.H4('augustus Trade Log Analysis'),
dt.DataTable(
rows=TRADE_LOG.to_dict('records'),
row_selectable=True,
filterable=True,
sortable=True,
selected_row_indices=[],
id='trade_log'
),
dcc.Graph(
id='drawdown_pnl'
),
dcc.Graph(
id='run_up_pnl'
),
], className="container")
@APP.callback(
Output('trade_log', 'selected_row_indices'),
[Input('drawdown_pnl', 'clickData')],
[State('trade_log', 'selected_row_indices')])
def update_selected_row_indices(clickData, selected_row_indices):
if clickData:
for point in clickData['points']:
if point['pointNumber'] in selected_row_indices:
selected_row_indices.remove(point['pointNumber'])
else:
selected_row_indices.append(point['pointNumber'])
return selected_row_indices
@APP.callback(
Output('drawdown_pnl', 'figure'),
[Input('trade_log', 'rows'),
Input('trade_log', 'selected_row_indices')])
def update_run_up_figure(rows, selected_row_indices):
dff = pd.DataFrame(rows)
profit_diff = dff.loc[dff.returns_diff > 0]
loss_diff = dff.loc[dff.returns_diff < 0]
fig = plotly.tools.make_subplots(
rows=1, cols=1,
shared_xaxes=True)
fig['layout'].update(dict(title='Profit & Loss vs Run-up'))
fig['layout']['xaxis'].update(dict(title='Run-up(%)'))
fig['layout']['yaxis'].update(dict(title='Profit & Loss(%)'))
fig.append_trace({
'x': profit_diff['run_up']*100,
'y': profit_diff['returns_diff']*100,
'text': profit_diff.entry_date + ' to ' + profit_diff.exit_date,
'type': 'scatter',
'marker': dict(color='black'),
'mode': 'markers',
'name': 'win',
'line': {'width': 1}
}, 1, 1)
fig.append_trace({
'x': loss_diff['run_up']*100,
'y': -loss_diff['returns_diff']*100,
'type': 'scatter',
'text': loss_diff.entry_date + ' to ' + loss_diff.exit_date,
'marker': dict(color='red'),
'mode': 'markers',
'name': 'lose',
'line': {'width': 1}
}, 1, 1)
fig.append_trace({
'x': [0, 10],
'y': [0, 10],
'type': 'scatter',
'mode': 'lines',
'name': 'Win diagonal',
'line': {'width': 1}
}, 1, 1)
return fig
@APP.callback(
Output('run_up_pnl', 'figure'),
[Input('trade_log', 'rows'),
Input('trade_log', 'selected_row_indices')])
def update__drawdown_figure(rows, selected_row_indices):
dff = pd.DataFrame(rows)
profit_diff = dff.loc[dff.returns_diff > 0]
loss_diff = dff.loc[dff.returns_diff < 0]
fig = plotly.tools.make_subplots(
rows=1, cols=1,
shared_xaxes=True)
fig['layout'].update(dict(title='Profit & Loss vs Drawdown'))
fig['layout']['xaxis'].update(dict(title='Drawdown(%)'))
fig['layout']['yaxis'].update(dict(title='Profit & Loss(%)'))
fig.append_trace({
'x': profit_diff['drawdown']*100,
'y': profit_diff['returns_diff']*100,
'type': 'scatter',
'marker': dict(color='black'),
'text': profit_diff.entry_date + ' to ' + profit_diff.exit_date,
'mode': 'markers',
'name': 'win',
'line': {'width': 1}
}, 1, 1)
fig.append_trace({
'x': loss_diff['drawdown']*100,
'y': -loss_diff['returns_diff']*100,
'text': loss_diff.entry_date + ' to ' + loss_diff.exit_date,
'type': 'scatter',
'marker': dict(color='red'),
'mode': 'markers',
'name': 'lose',
'line': {'width': 1}
}, 1, 1)
fig.append_trace({
'x': [0, 10],
'y': [0, 10],
'type': 'scatter',
'mode': 'lines',
'name': 'Loss diagonal',
'line': {'width': 1}
}, 1, 1)
return fig
if __name__ == '__main__':
APP.run_server(debug=True)
| true | true |
f72e5e82f62e0adf4ac740409e8e5cec97f305f7 | 1,039 | py | Python | tests/test_models.py | armendk/pyconcepticon | 7764d4b0900a37a76a6cb6ff9bdc8348502fa51d | [
"Apache-2.0"
] | 1 | 2020-04-17T21:21:49.000Z | 2020-04-17T21:21:49.000Z | tests/test_models.py | armendk/pyconcepticon | 7764d4b0900a37a76a6cb6ff9bdc8348502fa51d | [
"Apache-2.0"
] | null | null | null | tests/test_models.py | armendk/pyconcepticon | 7764d4b0900a37a76a6cb6ff9bdc8348502fa51d | [
"Apache-2.0"
] | null | null | null | import copy
import pytest
from pyconcepticon.models import *
@pytest.fixture
def sun1991(tmprepos):
return tmprepos / 'concepticondata' / 'conceptlists' / 'Sun-1991-1004.tsv'
def test_Conceptlist(sun1991, api):
kw = dict(
api=sun1991,
id='Abc-1234-12',
author='Some One',
year='1234',
list_suffix='a',
items='12',
tags='key1,key2',
source_language='eng',
target_language='other',
url=None,
refs='a,b',
pdf='',
note=None,
pages=None,
alias='',
local=True,
)
assert Conceptlist(**kw).tg
_kw = copy.deepcopy(kw)
_kw['api'] = api
assert Conceptlist(**kw).tg
with pytest.raises(ValueError):
_kw = copy.deepcopy(kw)
_kw['year'] = 'xy'
Conceptlist(**_kw)
@pytest.mark.filterwarnings("ignore:Unspecified column")
def test_compare_conceptlists(api, sun1991):
list(compare_conceptlists(api, sun1991))
list(compare_conceptlists(api, sun1991.stem))
| 21.645833 | 78 | 0.59769 | import copy
import pytest
from pyconcepticon.models import *
@pytest.fixture
def sun1991(tmprepos):
return tmprepos / 'concepticondata' / 'conceptlists' / 'Sun-1991-1004.tsv'
def test_Conceptlist(sun1991, api):
kw = dict(
api=sun1991,
id='Abc-1234-12',
author='Some One',
year='1234',
list_suffix='a',
items='12',
tags='key1,key2',
source_language='eng',
target_language='other',
url=None,
refs='a,b',
pdf='',
note=None,
pages=None,
alias='',
local=True,
)
assert Conceptlist(**kw).tg
_kw = copy.deepcopy(kw)
_kw['api'] = api
assert Conceptlist(**kw).tg
with pytest.raises(ValueError):
_kw = copy.deepcopy(kw)
_kw['year'] = 'xy'
Conceptlist(**_kw)
@pytest.mark.filterwarnings("ignore:Unspecified column")
def test_compare_conceptlists(api, sun1991):
list(compare_conceptlists(api, sun1991))
list(compare_conceptlists(api, sun1991.stem))
| true | true |
f72e5f909b0d7dbec63a8e2929a90bf6108605aa | 22,413 | py | Python | tests/ut/python/parallel/test_reshape.py | tjulitianyi1997/mindspore | c802a8c31fe2b51530d932fdd364824e45264b12 | [
"Apache-2.0"
] | 2 | 2020-04-28T03:49:10.000Z | 2020-04-28T03:49:13.000Z | tests/ut/python/parallel/test_reshape.py | tjulitianyi1997/mindspore | c802a8c31fe2b51530d932fdd364824e45264b12 | [
"Apache-2.0"
] | null | null | null | tests/ut/python/parallel/test_reshape.py | tjulitianyi1997/mindspore | c802a8c31fe2b51530d932fdd364824e45264b12 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mindspore.train import Model, ParallelMode
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
from mindspore.nn.optim.momentum import Momentum
from mindspore import Tensor
import mindspore as ms
import numpy as np
from mindspore.ops import operations as P
import mindspore.nn as nn
from mindspore.common.parameter import Parameter
from tests.dataset_mock import MindData
from mindspore import context
from tests.ut.python.ops.test_math_ops import VirtualLoss
from mindspore.common.api import _executor
from mindspore.ops import composite as C
from mindspore.ops.operations.comm_ops import _VirtualDataset
from mindspore.ops import functional as F
from mindspore.common.parameter import ParameterTuple
from mindspore.common import dtype as mstype
from mindspore.parallel import set_algo_parameters
context.set_context(mode=context.GRAPH_MODE)
context.reset_auto_parallel_context()
class Dataset(MindData):
def __init__(self, predict, label, length=3, input_num=2):
super(Dataset, self).__init__(size=length)
self.predict = predict
self.label = label
self.index = 0
self.length = length
self.input_num = input_num
def __iter__(self):
return self
def __next__(self):
if self.index >= self.length:
raise StopIteration
self.index += 1
if self.input_num == 2:
return self.predict, self.label
else:
return self.predict,
def reset(self):
self.index = 0
class ReshapeNet(nn.Cell):
def __init__(self, strategy0, strategy1, strategy2):
super(ReshapeNet, self).__init__()
self.relu = P.ReLU().set_strategy(strategy0)
self.reshape = P.Reshape().set_strategy(strategy1)
self.matmul = P.MatMul().set_strategy(strategy2)
self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name="weight")
def construct(self, x):
x = self.relu(x)
x = self.reshape(x, (256, 25088))
x = self.matmul(x, self.matmul_weight)
return x
def reshape_net(strategy0, strategy1, strategy2):
return ReshapeNet(strategy0=strategy0, strategy1=strategy1, strategy2=strategy2)
def reshape_common(parallel_mode, strategy0, strategy1, strategy2, strategy_loss):
batch_size = 32
learning_rate = 0.1
momentum = 0.9
epoch_size = 2
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)
predict = Tensor(np.ones([32, 512, 7, 7]), dtype=ms.float32)
label = Tensor(np.ones([32]), dtype=ms.int32)
dataset = Dataset(predict, label, 2)
net = reshape_net(strategy0, strategy1, strategy2)
loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
loss.softmax_cross_entropy.set_strategy(strategy_loss)
loss.one_hot.set_strategy(((8,1), (), ()))
opt = Momentum(net.trainable_params(), learning_rate, momentum)
model = Model(net, loss, opt)
model.train(epoch_size, dataset, dataset_sink_mode=False)
def test_reshape1():
strategy0 = ((8, 1, 1, 1), )
strategy1 = None
strategy2 = ((8, 1), (1, 1))
strategy_loss = ((8, 1), (8, 1))
reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)
def test_reshape1_strategy_1():
strategy0 = ((8, 1, 1, 1), )
strategy1 = ((8, 1, 1, 1), )
strategy2 = ((8, 1), (1, 1))
strategy_loss = ((8, 1), (8, 1))
try:
reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)
except:
pass
def test_reshape1_strategy_2():
strategy0 = ((8, 1, 1, 1), )
strategy1 = ((8, 1, 1, 1), )
strategy2 = ((8, 1), (1, 1))
strategy_loss = ((8, 1), (8, 1))
try:
reshape_common(ParallelMode.AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)
except:
pass
def test_reshape2():
strategy0 = ((8, 1, 1, 1), )
strategy1 = None
strategy2 = ((8, 1), (1, 1))
strategy_loss = ((8, 1), (8, 1))
reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)
def test_reshape3():
strategy0 = ((2, 1, 1, 1), )
strategy1 = None
strategy2 = ((8, 1), (1, 1))
strategy_loss = ((8, 1), (8, 1))
reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)
def test_reshape4():
strategy0 = ((1, 1, 1, 1), )
strategy1 = None
strategy2 = ((8, 1), (1, 1))
strategy_loss = ((8, 1), (8, 1))
reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)
def test_reshape5():
strategy0 = ((2, 1, 1, 1), )
strategy1 = None
strategy2 = ((1, 8), (8, 1))
strategy_loss = ((8, 1), (8, 1))
reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)
def test_reshape_auto():
strategy0 = None
strategy1 = None
strategy2 = None
strategy_loss = None
reshape_common(ParallelMode.AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)
class NetWithLoss(nn.Cell):
def __init__(self, network):
super(NetWithLoss, self).__init__()
self.loss = VirtualLoss()
self.network = network
def construct(self, x):
predict = self.network(x)
return self.loss(predict)
class GradWrap(nn.Cell):
def __init__(self, network):
super(GradWrap, self).__init__()
self.network = network
def construct(self, x):
return C.grad_all(self.network)(x)
class ReshapeNet1(nn.Cell):
def __init__(self, strategy0):
super(ReshapeNet1, self).__init__()
self.virtual_dataset = _VirtualDataset()
self.reshape = P.Reshape()
self.matmul = P.MatMul().set_strategy(strategy0)
self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name="weight")
self.reshape2 = P.Reshape()
def construct(self, x):
x = self.virtual_dataset(x)
x = self.reshape(x, (256, 25088))
x = self.matmul(x, self.matmul_weight)
x = self.reshape2(x, (256 * 256,))
return x
class ReshapeNet2(nn.Cell):
def __init__(self, strategy0):
super(ReshapeNet2, self).__init__()
self.virtual_dataset = _VirtualDataset()
self.reshape = P.Reshape()
self.matmul = P.MatMul().set_strategy(strategy0)
self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name="weight")
self.reshape2 = P.Reshape()
self.reduce_sum = P.ReduceSum(keep_dims=True)
self.reshape3 = P.Reshape()
def construct(self, x):
x = self.virtual_dataset(x)
x = self.reshape(x, (256, 25088))
x = self.matmul(x, self.matmul_weight)
x = self.reshape2(x, (256 * 256,))
x = self.reduce_sum(x, -1)
x = self.reshape3(x, ())
return x
class ReshapeNet3(nn.Cell):
def __init__(self, strategy0):
super(ReshapeNet3, self).__init__()
self.virtual_dataset = _VirtualDataset()
self.reshape = P.Reshape()
self.matmul = P.MatMul().set_strategy(strategy0)
self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name="weight")
self.reshape2 = P.Reshape()
self.reduce_sum = P.ReduceSum(keep_dims=False)
self.reshape3 = P.Reshape()
def construct(self, x):
x = self.virtual_dataset(x)
x = self.reshape(x, (256, 25088))
x = self.matmul(x, self.matmul_weight)
x = self.reshape2(x, (256 * 256,))
x = self.reduce_sum(x, -1)
x = self.reshape3(x, (1, 1))
return x
class ReshapeNet4(nn.Cell):
def __init__(self, strategy0):
super(ReshapeNet4, self).__init__()
self.virtual_dataset = _VirtualDataset()
self.reshape = P.Reshape()
self.reshape2 = P.Reshape()
self.matmul = P.MatMul().set_strategy(strategy0)
self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name="weight")
def construct(self, x):
x = self.virtual_dataset(x)
x = self.reshape(x, (256, 25088))
w = self.reshape2(self.matmul_weight, (25088, 256))
x = self.matmul(x, w)
return x
class ReshapeNet5(nn.Cell):
def __init__(self, strategy0):
super(ReshapeNet5, self).__init__()
self.virtual_dataset = _VirtualDataset()
self.reshape = P.Reshape()
self.matmul1 = P.MatMul().set_strategy(strategy0)
self.matmul1_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name="weight")
self.matmul2 = P.MatMul().set_strategy(strategy0)
def construct(self, x):
x = self.virtual_dataset(x)
x = self.reshape(x, (256, 25088))
matmul1_o = self.matmul1(x, self.matmul1_weight)
matmul2_o = self.matmul2(matmul1_o, x)
return matmul2_o
class ReshapeNet6(nn.Cell):
def __init__(self, strategy0):
super(ReshapeNet6, self).__init__()
self.virtual_dataset = _VirtualDataset()
self.reshape = P.Reshape()
self.matmul1_1 = P.MatMul().set_strategy(strategy0)
self.matmul1_2 = P.MatMul().set_strategy(strategy0)
self.matmul1_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name="weight")
self.matmul2 = P.MatMul().set_strategy(strategy0)
self.add = P.TensorAdd()
def construct(self, x):
x = self.virtual_dataset(x)
x = self.reshape(x, (256, 25088))
matmul1_1_o = self.matmul1_1(x, self.matmul1_weight)
matmul1_2_o = self.matmul1_2(x, self.matmul1_weight)
matmul1_o = self.add(matmul1_1_o, matmul1_2_o)
matmul2_o = self.matmul2(matmul1_o, x)
return matmul2_o
def reshape_net2(backbone):
batch_size = 16
device_num = 16
context.set_auto_parallel_context(device_num=device_num, global_rank=0)
input = Tensor(np.ones([batch_size * device_num, 512, 7, 7]).astype(np.float32) * 0.01)
net = GradWrap(NetWithLoss(backbone))
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
_executor.compile(net, input)
def test_reshape_net1_1():
reshape_net2(ReshapeNet1(((1, 8), (8, 1))))
def test_reshape_net1_2():
reshape_net2(ReshapeNet1(((1, 8), (8, 2))))
def test_reshape_net2_1():
reshape_net2(ReshapeNet2(((1, 8), (8, 1))))
def test_reshape_net2_2():
reshape_net2(ReshapeNet2(((1, 8), (8, 2))))
def test_reshape_net3_1():
reshape_net2(ReshapeNet3(((1, 8), (8, 1))))
def test_reshape_net3_2():
reshape_net2(ReshapeNet3(((1, 8), (8, 2))))
def test_reshape_net4_1():
try:
reshape_net2(ReshapeNet4(((1, 8), (8, 1))))
except:
pass
def test_reshape_net4_2():
try:
reshape_net2(ReshapeNet4(((1, 8), (8, 2))))
except:
pass
def test_reshape_net5_1():
reshape_net2(ReshapeNet5(((1, 8), (8, 1))))
def test_reshape_net5_2():
reshape_net2(ReshapeNet5(((1, 8), (8, 2))))
def test_reshape_net6_1():
reshape_net2(ReshapeNet6(((1, 8), (8, 1))))
def test_reshape_net6_2():
reshape_net2(ReshapeNet6(((1, 8), (8, 2))))
class TrainOneStepCell(nn.Cell):
"""
Network training package class.
Append an optimizer to the training network after that the construct function
can be called to create the backward graph.
Args:
network (Cell): The training network.
optimizer (Cell): Optimizer for updating the weights.
sens (Number): The adjust parameter. Default: 1.0.
Examples:
>>> net = Net()
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits()
>>> optim = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
>>> loss_net = WithLossCell(net, loss_fn)
>>> train_net = TrainOneStepCell(loss_net, optim)
"""
def __init__(self, network, optimizer, sens=1.0):
super(TrainOneStepCell, self).__init__(auto_prefix=False)
self.network = network
self.network.add_flags(defer_inline=True)
self.weights = ParameterTuple(network.trainable_params())
self.optimizer = optimizer
self.grad = C.GradOperation('grad',
get_by_list=True,
sens_param=True)
self.sens = sens
def construct(self, data):
weights = self.weights
loss = self.network(data)
sens = P.Fill()(P.DType()(loss), P.Shape()(loss), self.sens)
grads = self.grad(self.network, weights)(data, sens)
return F.depend(loss, self.optimizer(grads))
def reshape_common2(parallel_mode, net):
batch_size = 16
learning_rate = 0.1
momentum = 0.9
epoch_size = 2
predict = Tensor(np.ones([batch_size, 512, 7, 7]), dtype=ms.float32)
label = Tensor(np.ones([batch_size]), dtype=ms.int32)
dataset = Dataset(predict, label, 2, input_num=1)
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=16)
opt = Momentum(net.trainable_params(), learning_rate, momentum)
train_net = TrainOneStepCell(net, opt).set_train()
model = Model(train_net)
model.train(epoch_size, dataset, dataset_sink_mode=False)
def test_reshape_common2_0():
reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet1(((1, 8), (8, 1))))
def test_reshape_common2_1():
reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet1(((1, 8), (8, 2))))
def test_reshape_common2_2():
reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet2(((1, 8), (8, 1))))
def test_reshape_common2_3():
reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet2(((1, 8), (8, 2))))
def test_reshape_common2_4():
reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet3(((1, 8), (8, 1))))
def test_reshape_common2_5():
reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet3(((1, 8), (8, 2))))
class BatchNormReshapeNet(nn.Cell):
def __init__(self):
super(BatchNormReshapeNet, self).__init__()
self.vd = P._VirtualDataset()
self.batch_norm = nn.BatchNorm1d(512, affine=False)
self.reshape = P.Reshape()
self.prelu = nn.PReLU(channel=256)
def construct(self, x):
x = self.vd(x)
x = self.batch_norm(x)
x = self.reshape(x, (512, 256))
x = self.prelu(x)
return x
def test_batchnorm_reshape_train():
batch_size = 16
device_num = 16
context.set_auto_parallel_context(device_num=device_num, global_rank=0)
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
input = Tensor(np.ones([batch_size * device_num, 512]).astype(np.float32) * 0.01)
net = GradWrap(NetWithLoss(BatchNormReshapeNet()))
_executor.compile(net, input)
def bn_with_initialize(out_channels):
bn = nn.BatchNorm2d(out_channels, momentum=0.3, eps=1e-5).add_flags_recursive(fp32=True)
return bn
def fc_with_initialize(input_channels, out_channels):
return nn.Dense(input_channels, out_channels).add_flags_recursive(fp16=True)
class BNReshapeDenseBNNet(nn.Cell):
def __init__(self):
super(BNReshapeDenseBNNet, self).__init__()
self.batch_norm = bn_with_initialize(2)
self.reshape = P.Reshape()
self.cast = P.Cast()
self.batch_norm2 = nn.BatchNorm1d(512, affine=False)
self.fc = fc_with_initialize(2 * 32 * 32, 512)
def construct(self, x):
x = self.batch_norm(x)
x = self.reshape(x, (16, 2*32*32))
x = self.fc(x)
x = self.batch_norm2(x)
return x
def test_bn_reshape_dense_bn_train():
batch_size = 16
device_num = 16
context.set_auto_parallel_context(device_num=device_num, global_rank=0)
input = Tensor(np.ones([batch_size, 2, 32, 32]).astype(np.float32) * 0.01)
net = GradWrap(NetWithLoss(BNReshapeDenseBNNet()))
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
_executor.compile(net, input)
class ParallelReduceMeanNet(nn.Cell):
def __init__(self, conv_in_channel, conv_out_channel,
reducemean_keep_dims=False, reducemean_axis=-1, strategy=None):
super().__init__()
self.conv = nn.Conv2d(in_channels=conv_in_channel, out_channels=conv_out_channel,
kernel_size=1, stride=1, pad_mode='valid', has_bias=True,
weight_init='ones', bias_init='ones')
self.reduce_mean = P.ReduceMean(keep_dims=reducemean_keep_dims)
self.flat = nn.Flatten()
self.reducemean_axis = reducemean_axis
if strategy is not None:
self.reduce_mean.set_strategy(strategy)
def construct(self, inputs):
x = self.conv(inputs)
x = self.reduce_mean(x, self.reducemean_axis)
x = self.flat(x)
return x
class CrossEntropyLoss(nn.Cell):
def __init__(self, reduction='mean'):
super(CrossEntropyLoss, self).__init__()
self.reduce_mean = P.ReduceMean()
self.cross_entropy = SoftmaxCrossEntropyWithLogits()
self.reduction = reduction
def construct(self, logits, label):
loss = self.cross_entropy(logits, label)
if self.reduction == 'mean':
loss = self.reduce_mean(loss, (-1,))
return loss
def test_flatten_reshape(parallel_mode="auto_parallel"):
batch_size = 16
learning_rate = 0.1
momentum = 0.9
epoch_size = 2
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)
net = ParallelReduceMeanNet(conv_in_channel=3, conv_out_channel=64, reducemean_axis=(2, 3), strategy=((4, 2, 1, 1),))
loss = CrossEntropyLoss()
predict = Tensor(np.ones([batch_size, 3, 32, 32]), dtype=ms.float32)
label = Tensor(np.ones([batch_size, 64]), dtype=ms.float32)
dataset = Dataset(predict, label, 2, input_num=2)
opt = Momentum(net.trainable_params(), learning_rate, momentum)
model = Model(net, loss_fn = loss, optimizer=opt)
model.train(epoch_size, dataset, dataset_sink_mode=False)
def test_flatten_reshape2(parallel_mode="auto_parallel"):
batch_size = 16
learning_rate = 0.1
momentum = 0.9
epoch_size = 2
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)
set_algo_parameters(fully_use_devices=False)
net = ParallelReduceMeanNet(conv_in_channel=3, conv_out_channel=64, reducemean_axis=(2, 3), strategy=((4, 1, 1, 1),))
loss = CrossEntropyLoss()
predict = Tensor(np.ones([batch_size, 3, 32, 32]), dtype=ms.float32)
label = Tensor(np.ones([batch_size, 64]), dtype=ms.float32)
dataset = Dataset(predict, label, 2, input_num=2)
opt = Momentum(net.trainable_params(), learning_rate, momentum)
model = Model(net, loss_fn = loss, optimizer=opt)
model.train(epoch_size, dataset, dataset_sink_mode=False)
class ParallelReshapeNet(nn.Cell):
def __init__(self, dense_in_channel, dense_out_channel, shape, strategy=None):
super().__init__()
self.flat = nn.Flatten()
self.dense = nn.Dense(in_channels=dense_in_channel,
out_channels=dense_out_channel,
weight_init='ones',
bias_init='ones',
has_bias=True)
self.reshape = P.Reshape()
self.shape = shape
self.reshape.set_strategy(strategy)
def construct(self, inputs):
x = self.flat(inputs)
x = self.dense(x)
x = self.reshape(x, self.shape)
return x
# the shape of input and output of reshape is the same
# reshape is optimized before step_parallel
def test_flatten_reshape3(parallel_mode="auto_parallel"):
batch_size = 16
learning_rate = 0.1
momentum = 0.9
epoch_size = 2
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)
set_algo_parameters(fully_use_devices=False)
net = ParallelReshapeNet(dense_in_channel=2048, dense_out_channel=1000, shape=(128, 1000), strategy=((16, 1),))
loss = CrossEntropyLoss()
predict = Tensor(np.ones([batch_size, 1, 2, 1024]), dtype=ms.float32)
label = Tensor(np.ones([batch_size, 1000]), dtype=ms.float32)
dataset = Dataset(predict, label, 2, input_num=2)
opt = Momentum(net.trainable_params(), learning_rate, momentum)
model = Model(net, loss_fn = loss, optimizer=opt)
model.train(epoch_size, dataset, dataset_sink_mode=False)
class CrossEntropyLoss2(nn.Cell):
def __init__(self, reduction='mean'):
super(CrossEntropyLoss2, self).__init__()
self.cross_entropy = SoftmaxCrossEntropyWithLogits(reduction=reduction)
def construct(self, logits, label):
loss = self.cross_entropy(logits, label)
return loss
def test_flatten_reshape4(parallel_mode="semi_auto_parallel"):
batch_size = 16
learning_rate = 0.1
momentum = 0.9
epoch_size = 2
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)
set_algo_parameters(fully_use_devices=False)
net = ParallelReduceMeanNet(conv_in_channel=3, conv_out_channel=64, reducemean_keep_dims=True, strategy=((4, 1, 1, 1),))
loss = CrossEntropyLoss2()
predict = Tensor(np.ones([batch_size, 3, 32, 32]), dtype=ms.float32)
label = Tensor(np.ones([batch_size, 2048]), dtype=ms.float32)
dataset = Dataset(predict, label, 2, input_num=2)
opt = Momentum(net.trainable_params(), learning_rate, momentum)
model = Model(net, loss_fn=loss, optimizer=opt)
model.train(epoch_size, dataset, dataset_sink_mode=False)
| 34.010622 | 124 | 0.668674 |
from mindspore.train import Model, ParallelMode
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
from mindspore.nn.optim.momentum import Momentum
from mindspore import Tensor
import mindspore as ms
import numpy as np
from mindspore.ops import operations as P
import mindspore.nn as nn
from mindspore.common.parameter import Parameter
from tests.dataset_mock import MindData
from mindspore import context
from tests.ut.python.ops.test_math_ops import VirtualLoss
from mindspore.common.api import _executor
from mindspore.ops import composite as C
from mindspore.ops.operations.comm_ops import _VirtualDataset
from mindspore.ops import functional as F
from mindspore.common.parameter import ParameterTuple
from mindspore.common import dtype as mstype
from mindspore.parallel import set_algo_parameters
context.set_context(mode=context.GRAPH_MODE)
context.reset_auto_parallel_context()
class Dataset(MindData):
def __init__(self, predict, label, length=3, input_num=2):
super(Dataset, self).__init__(size=length)
self.predict = predict
self.label = label
self.index = 0
self.length = length
self.input_num = input_num
def __iter__(self):
return self
def __next__(self):
if self.index >= self.length:
raise StopIteration
self.index += 1
if self.input_num == 2:
return self.predict, self.label
else:
return self.predict,
def reset(self):
self.index = 0
class ReshapeNet(nn.Cell):
def __init__(self, strategy0, strategy1, strategy2):
super(ReshapeNet, self).__init__()
self.relu = P.ReLU().set_strategy(strategy0)
self.reshape = P.Reshape().set_strategy(strategy1)
self.matmul = P.MatMul().set_strategy(strategy2)
self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name="weight")
def construct(self, x):
x = self.relu(x)
x = self.reshape(x, (256, 25088))
x = self.matmul(x, self.matmul_weight)
return x
def reshape_net(strategy0, strategy1, strategy2):
return ReshapeNet(strategy0=strategy0, strategy1=strategy1, strategy2=strategy2)
def reshape_common(parallel_mode, strategy0, strategy1, strategy2, strategy_loss):
batch_size = 32
learning_rate = 0.1
momentum = 0.9
epoch_size = 2
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)
predict = Tensor(np.ones([32, 512, 7, 7]), dtype=ms.float32)
label = Tensor(np.ones([32]), dtype=ms.int32)
dataset = Dataset(predict, label, 2)
net = reshape_net(strategy0, strategy1, strategy2)
loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
loss.softmax_cross_entropy.set_strategy(strategy_loss)
loss.one_hot.set_strategy(((8,1), (), ()))
opt = Momentum(net.trainable_params(), learning_rate, momentum)
model = Model(net, loss, opt)
model.train(epoch_size, dataset, dataset_sink_mode=False)
def test_reshape1():
strategy0 = ((8, 1, 1, 1), )
strategy1 = None
strategy2 = ((8, 1), (1, 1))
strategy_loss = ((8, 1), (8, 1))
reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)
def test_reshape1_strategy_1():
strategy0 = ((8, 1, 1, 1), )
strategy1 = ((8, 1, 1, 1), )
strategy2 = ((8, 1), (1, 1))
strategy_loss = ((8, 1), (8, 1))
try:
reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)
except:
pass
def test_reshape1_strategy_2():
strategy0 = ((8, 1, 1, 1), )
strategy1 = ((8, 1, 1, 1), )
strategy2 = ((8, 1), (1, 1))
strategy_loss = ((8, 1), (8, 1))
try:
reshape_common(ParallelMode.AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)
except:
pass
def test_reshape2():
strategy0 = ((8, 1, 1, 1), )
strategy1 = None
strategy2 = ((8, 1), (1, 1))
strategy_loss = ((8, 1), (8, 1))
reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)
def test_reshape3():
strategy0 = ((2, 1, 1, 1), )
strategy1 = None
strategy2 = ((8, 1), (1, 1))
strategy_loss = ((8, 1), (8, 1))
reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)
def test_reshape4():
strategy0 = ((1, 1, 1, 1), )
strategy1 = None
strategy2 = ((8, 1), (1, 1))
strategy_loss = ((8, 1), (8, 1))
reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)
def test_reshape5():
strategy0 = ((2, 1, 1, 1), )
strategy1 = None
strategy2 = ((1, 8), (8, 1))
strategy_loss = ((8, 1), (8, 1))
reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)
def test_reshape_auto():
strategy0 = None
strategy1 = None
strategy2 = None
strategy_loss = None
reshape_common(ParallelMode.AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)
class NetWithLoss(nn.Cell):
def __init__(self, network):
super(NetWithLoss, self).__init__()
self.loss = VirtualLoss()
self.network = network
def construct(self, x):
predict = self.network(x)
return self.loss(predict)
class GradWrap(nn.Cell):
def __init__(self, network):
super(GradWrap, self).__init__()
self.network = network
def construct(self, x):
return C.grad_all(self.network)(x)
class ReshapeNet1(nn.Cell):
def __init__(self, strategy0):
super(ReshapeNet1, self).__init__()
self.virtual_dataset = _VirtualDataset()
self.reshape = P.Reshape()
self.matmul = P.MatMul().set_strategy(strategy0)
self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name="weight")
self.reshape2 = P.Reshape()
def construct(self, x):
x = self.virtual_dataset(x)
x = self.reshape(x, (256, 25088))
x = self.matmul(x, self.matmul_weight)
x = self.reshape2(x, (256 * 256,))
return x
class ReshapeNet2(nn.Cell):
def __init__(self, strategy0):
super(ReshapeNet2, self).__init__()
self.virtual_dataset = _VirtualDataset()
self.reshape = P.Reshape()
self.matmul = P.MatMul().set_strategy(strategy0)
self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name="weight")
self.reshape2 = P.Reshape()
self.reduce_sum = P.ReduceSum(keep_dims=True)
self.reshape3 = P.Reshape()
def construct(self, x):
x = self.virtual_dataset(x)
x = self.reshape(x, (256, 25088))
x = self.matmul(x, self.matmul_weight)
x = self.reshape2(x, (256 * 256,))
x = self.reduce_sum(x, -1)
x = self.reshape3(x, ())
return x
class ReshapeNet3(nn.Cell):
def __init__(self, strategy0):
super(ReshapeNet3, self).__init__()
self.virtual_dataset = _VirtualDataset()
self.reshape = P.Reshape()
self.matmul = P.MatMul().set_strategy(strategy0)
self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name="weight")
self.reshape2 = P.Reshape()
self.reduce_sum = P.ReduceSum(keep_dims=False)
self.reshape3 = P.Reshape()
def construct(self, x):
x = self.virtual_dataset(x)
x = self.reshape(x, (256, 25088))
x = self.matmul(x, self.matmul_weight)
x = self.reshape2(x, (256 * 256,))
x = self.reduce_sum(x, -1)
x = self.reshape3(x, (1, 1))
return x
class ReshapeNet4(nn.Cell):
def __init__(self, strategy0):
super(ReshapeNet4, self).__init__()
self.virtual_dataset = _VirtualDataset()
self.reshape = P.Reshape()
self.reshape2 = P.Reshape()
self.matmul = P.MatMul().set_strategy(strategy0)
self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name="weight")
def construct(self, x):
x = self.virtual_dataset(x)
x = self.reshape(x, (256, 25088))
w = self.reshape2(self.matmul_weight, (25088, 256))
x = self.matmul(x, w)
return x
class ReshapeNet5(nn.Cell):
def __init__(self, strategy0):
super(ReshapeNet5, self).__init__()
self.virtual_dataset = _VirtualDataset()
self.reshape = P.Reshape()
self.matmul1 = P.MatMul().set_strategy(strategy0)
self.matmul1_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name="weight")
self.matmul2 = P.MatMul().set_strategy(strategy0)
def construct(self, x):
x = self.virtual_dataset(x)
x = self.reshape(x, (256, 25088))
matmul1_o = self.matmul1(x, self.matmul1_weight)
matmul2_o = self.matmul2(matmul1_o, x)
return matmul2_o
class ReshapeNet6(nn.Cell):
def __init__(self, strategy0):
super(ReshapeNet6, self).__init__()
self.virtual_dataset = _VirtualDataset()
self.reshape = P.Reshape()
self.matmul1_1 = P.MatMul().set_strategy(strategy0)
self.matmul1_2 = P.MatMul().set_strategy(strategy0)
self.matmul1_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name="weight")
self.matmul2 = P.MatMul().set_strategy(strategy0)
self.add = P.TensorAdd()
def construct(self, x):
x = self.virtual_dataset(x)
x = self.reshape(x, (256, 25088))
matmul1_1_o = self.matmul1_1(x, self.matmul1_weight)
matmul1_2_o = self.matmul1_2(x, self.matmul1_weight)
matmul1_o = self.add(matmul1_1_o, matmul1_2_o)
matmul2_o = self.matmul2(matmul1_o, x)
return matmul2_o
def reshape_net2(backbone):
batch_size = 16
device_num = 16
context.set_auto_parallel_context(device_num=device_num, global_rank=0)
input = Tensor(np.ones([batch_size * device_num, 512, 7, 7]).astype(np.float32) * 0.01)
net = GradWrap(NetWithLoss(backbone))
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
_executor.compile(net, input)
def test_reshape_net1_1():
reshape_net2(ReshapeNet1(((1, 8), (8, 1))))
def test_reshape_net1_2():
reshape_net2(ReshapeNet1(((1, 8), (8, 2))))
def test_reshape_net2_1():
reshape_net2(ReshapeNet2(((1, 8), (8, 1))))
def test_reshape_net2_2():
reshape_net2(ReshapeNet2(((1, 8), (8, 2))))
def test_reshape_net3_1():
reshape_net2(ReshapeNet3(((1, 8), (8, 1))))
def test_reshape_net3_2():
reshape_net2(ReshapeNet3(((1, 8), (8, 2))))
def test_reshape_net4_1():
try:
reshape_net2(ReshapeNet4(((1, 8), (8, 1))))
except:
pass
def test_reshape_net4_2():
try:
reshape_net2(ReshapeNet4(((1, 8), (8, 2))))
except:
pass
def test_reshape_net5_1():
reshape_net2(ReshapeNet5(((1, 8), (8, 1))))
def test_reshape_net5_2():
reshape_net2(ReshapeNet5(((1, 8), (8, 2))))
def test_reshape_net6_1():
reshape_net2(ReshapeNet6(((1, 8), (8, 1))))
def test_reshape_net6_2():
reshape_net2(ReshapeNet6(((1, 8), (8, 2))))
class TrainOneStepCell(nn.Cell):
def __init__(self, network, optimizer, sens=1.0):
super(TrainOneStepCell, self).__init__(auto_prefix=False)
self.network = network
self.network.add_flags(defer_inline=True)
self.weights = ParameterTuple(network.trainable_params())
self.optimizer = optimizer
self.grad = C.GradOperation('grad',
get_by_list=True,
sens_param=True)
self.sens = sens
def construct(self, data):
weights = self.weights
loss = self.network(data)
sens = P.Fill()(P.DType()(loss), P.Shape()(loss), self.sens)
grads = self.grad(self.network, weights)(data, sens)
return F.depend(loss, self.optimizer(grads))
def reshape_common2(parallel_mode, net):
batch_size = 16
learning_rate = 0.1
momentum = 0.9
epoch_size = 2
predict = Tensor(np.ones([batch_size, 512, 7, 7]), dtype=ms.float32)
label = Tensor(np.ones([batch_size]), dtype=ms.int32)
dataset = Dataset(predict, label, 2, input_num=1)
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=16)
opt = Momentum(net.trainable_params(), learning_rate, momentum)
train_net = TrainOneStepCell(net, opt).set_train()
model = Model(train_net)
model.train(epoch_size, dataset, dataset_sink_mode=False)
def test_reshape_common2_0():
reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet1(((1, 8), (8, 1))))
def test_reshape_common2_1():
reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet1(((1, 8), (8, 2))))
def test_reshape_common2_2():
reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet2(((1, 8), (8, 1))))
def test_reshape_common2_3():
reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet2(((1, 8), (8, 2))))
def test_reshape_common2_4():
reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet3(((1, 8), (8, 1))))
def test_reshape_common2_5():
reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet3(((1, 8), (8, 2))))
class BatchNormReshapeNet(nn.Cell):
def __init__(self):
super(BatchNormReshapeNet, self).__init__()
self.vd = P._VirtualDataset()
self.batch_norm = nn.BatchNorm1d(512, affine=False)
self.reshape = P.Reshape()
self.prelu = nn.PReLU(channel=256)
def construct(self, x):
x = self.vd(x)
x = self.batch_norm(x)
x = self.reshape(x, (512, 256))
x = self.prelu(x)
return x
def test_batchnorm_reshape_train():
batch_size = 16
device_num = 16
context.set_auto_parallel_context(device_num=device_num, global_rank=0)
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
input = Tensor(np.ones([batch_size * device_num, 512]).astype(np.float32) * 0.01)
net = GradWrap(NetWithLoss(BatchNormReshapeNet()))
_executor.compile(net, input)
def bn_with_initialize(out_channels):
bn = nn.BatchNorm2d(out_channels, momentum=0.3, eps=1e-5).add_flags_recursive(fp32=True)
return bn
def fc_with_initialize(input_channels, out_channels):
return nn.Dense(input_channels, out_channels).add_flags_recursive(fp16=True)
class BNReshapeDenseBNNet(nn.Cell):
def __init__(self):
super(BNReshapeDenseBNNet, self).__init__()
self.batch_norm = bn_with_initialize(2)
self.reshape = P.Reshape()
self.cast = P.Cast()
self.batch_norm2 = nn.BatchNorm1d(512, affine=False)
self.fc = fc_with_initialize(2 * 32 * 32, 512)
def construct(self, x):
x = self.batch_norm(x)
x = self.reshape(x, (16, 2*32*32))
x = self.fc(x)
x = self.batch_norm2(x)
return x
def test_bn_reshape_dense_bn_train():
batch_size = 16
device_num = 16
context.set_auto_parallel_context(device_num=device_num, global_rank=0)
input = Tensor(np.ones([batch_size, 2, 32, 32]).astype(np.float32) * 0.01)
net = GradWrap(NetWithLoss(BNReshapeDenseBNNet()))
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
_executor.compile(net, input)
class ParallelReduceMeanNet(nn.Cell):
def __init__(self, conv_in_channel, conv_out_channel,
reducemean_keep_dims=False, reducemean_axis=-1, strategy=None):
super().__init__()
self.conv = nn.Conv2d(in_channels=conv_in_channel, out_channels=conv_out_channel,
kernel_size=1, stride=1, pad_mode='valid', has_bias=True,
weight_init='ones', bias_init='ones')
self.reduce_mean = P.ReduceMean(keep_dims=reducemean_keep_dims)
self.flat = nn.Flatten()
self.reducemean_axis = reducemean_axis
if strategy is not None:
self.reduce_mean.set_strategy(strategy)
def construct(self, inputs):
x = self.conv(inputs)
x = self.reduce_mean(x, self.reducemean_axis)
x = self.flat(x)
return x
class CrossEntropyLoss(nn.Cell):
def __init__(self, reduction='mean'):
super(CrossEntropyLoss, self).__init__()
self.reduce_mean = P.ReduceMean()
self.cross_entropy = SoftmaxCrossEntropyWithLogits()
self.reduction = reduction
def construct(self, logits, label):
loss = self.cross_entropy(logits, label)
if self.reduction == 'mean':
loss = self.reduce_mean(loss, (-1,))
return loss
def test_flatten_reshape(parallel_mode="auto_parallel"):
batch_size = 16
learning_rate = 0.1
momentum = 0.9
epoch_size = 2
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)
net = ParallelReduceMeanNet(conv_in_channel=3, conv_out_channel=64, reducemean_axis=(2, 3), strategy=((4, 2, 1, 1),))
loss = CrossEntropyLoss()
predict = Tensor(np.ones([batch_size, 3, 32, 32]), dtype=ms.float32)
label = Tensor(np.ones([batch_size, 64]), dtype=ms.float32)
dataset = Dataset(predict, label, 2, input_num=2)
opt = Momentum(net.trainable_params(), learning_rate, momentum)
model = Model(net, loss_fn = loss, optimizer=opt)
model.train(epoch_size, dataset, dataset_sink_mode=False)
def test_flatten_reshape2(parallel_mode="auto_parallel"):
batch_size = 16
learning_rate = 0.1
momentum = 0.9
epoch_size = 2
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)
set_algo_parameters(fully_use_devices=False)
net = ParallelReduceMeanNet(conv_in_channel=3, conv_out_channel=64, reducemean_axis=(2, 3), strategy=((4, 1, 1, 1),))
loss = CrossEntropyLoss()
predict = Tensor(np.ones([batch_size, 3, 32, 32]), dtype=ms.float32)
label = Tensor(np.ones([batch_size, 64]), dtype=ms.float32)
dataset = Dataset(predict, label, 2, input_num=2)
opt = Momentum(net.trainable_params(), learning_rate, momentum)
model = Model(net, loss_fn = loss, optimizer=opt)
model.train(epoch_size, dataset, dataset_sink_mode=False)
class ParallelReshapeNet(nn.Cell):
def __init__(self, dense_in_channel, dense_out_channel, shape, strategy=None):
super().__init__()
self.flat = nn.Flatten()
self.dense = nn.Dense(in_channels=dense_in_channel,
out_channels=dense_out_channel,
weight_init='ones',
bias_init='ones',
has_bias=True)
self.reshape = P.Reshape()
self.shape = shape
self.reshape.set_strategy(strategy)
def construct(self, inputs):
x = self.flat(inputs)
x = self.dense(x)
x = self.reshape(x, self.shape)
return x
def test_flatten_reshape3(parallel_mode="auto_parallel"):
batch_size = 16
learning_rate = 0.1
momentum = 0.9
epoch_size = 2
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)
set_algo_parameters(fully_use_devices=False)
net = ParallelReshapeNet(dense_in_channel=2048, dense_out_channel=1000, shape=(128, 1000), strategy=((16, 1),))
loss = CrossEntropyLoss()
predict = Tensor(np.ones([batch_size, 1, 2, 1024]), dtype=ms.float32)
label = Tensor(np.ones([batch_size, 1000]), dtype=ms.float32)
dataset = Dataset(predict, label, 2, input_num=2)
opt = Momentum(net.trainable_params(), learning_rate, momentum)
model = Model(net, loss_fn = loss, optimizer=opt)
model.train(epoch_size, dataset, dataset_sink_mode=False)
class CrossEntropyLoss2(nn.Cell):
def __init__(self, reduction='mean'):
super(CrossEntropyLoss2, self).__init__()
self.cross_entropy = SoftmaxCrossEntropyWithLogits(reduction=reduction)
def construct(self, logits, label):
loss = self.cross_entropy(logits, label)
return loss
def test_flatten_reshape4(parallel_mode="semi_auto_parallel"):
batch_size = 16
learning_rate = 0.1
momentum = 0.9
epoch_size = 2
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)
set_algo_parameters(fully_use_devices=False)
net = ParallelReduceMeanNet(conv_in_channel=3, conv_out_channel=64, reducemean_keep_dims=True, strategy=((4, 1, 1, 1),))
loss = CrossEntropyLoss2()
predict = Tensor(np.ones([batch_size, 3, 32, 32]), dtype=ms.float32)
label = Tensor(np.ones([batch_size, 2048]), dtype=ms.float32)
dataset = Dataset(predict, label, 2, input_num=2)
opt = Momentum(net.trainable_params(), learning_rate, momentum)
model = Model(net, loss_fn=loss, optimizer=opt)
model.train(epoch_size, dataset, dataset_sink_mode=False)
| true | true |
f72e61805f48ef4806ac3d02c1f59eb7f642baa8 | 4,674 | py | Python | models/clnet.py | angseung/torch_cifar10 | 3160f749f3bffd941d6c0fb98ddaad63d4e5641d | [
"MIT"
] | null | null | null | models/clnet.py | angseung/torch_cifar10 | 3160f749f3bffd941d6c0fb98ddaad63d4e5641d | [
"MIT"
] | null | null | null | models/clnet.py | angseung/torch_cifar10 | 3160f749f3bffd941d6c0fb98ddaad63d4e5641d | [
"MIT"
] | null | null | null | '''
CrossLink Network
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
def swish(x):
return x * x.sigmoid()
def mish(x):
return x * torch.tanh(F.softplus(x))
class CrossLinkBlock(nn.Module):
'''Cross-Link Block'''
def __init__(self, in_channels, out_channels, kernel_size, pool_enable):
super(CrossLinkBlock, self).__init__()
self.pool_enable = pool_enable
self.ReLU = nn.ReLU()
# basic blocks
self.dconv1_1 = nn.Conv2d(in_channels,
in_channels,
kernel_size=kernel_size[0],
stride=1,
padding='same',
groups=1,
bias=False)
self.dconv1_2 = nn.Conv2d(in_channels,
in_channels,
kernel_size=kernel_size[1],
stride=1,
padding='same',
groups=1,
bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.bn2 = nn.BatchNorm2d(in_channels)
self.pconv = nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=1,
padding='same',
groups=1,
bias=False)
self.bn3 = nn.BatchNorm2d(out_channels)
self.maxpool = nn.MaxPool2d(2, 2)
def forward(self, x):
'''add forward here'''
out1 = self.dconv1_1(x)
out2 = self.dconv1_2(x)
out1 = torch.mul(out1, self.ReLU(out1))
out2 = torch.mul(out1, self.ReLU(out2))
out = self.bn1(out1) + self.bn2(out2)
out = self.bn3(self.pconv(out))
if self.pool_enable:
out = self.maxpool(out)
return out
class CLNET(nn.Module):
def __init__(self, cfg, num_classes=10):
super(CLNET, self).__init__()
self.cfg = cfg
self.conv1 = nn.Conv2d(3,
32,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(32,
32,
kernel_size=3,
stride=1,
padding=1,
groups=1,
bias=False)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32,
16,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.layers = self._make_layers(in_channels=16)
self.linear = nn.Linear(cfg['out_channels'][-1], num_classes)
def _make_layers(self, in_channels):
layers = []
cfg = [self.cfg[k] for k in ['out_channels', 'kernel_size', 'pool_enable']]
for out_channels, kernel_size, pool_enable in zip(*cfg):
layers.append(
CrossLinkBlock(in_channels,
out_channels,
kernel_size,
pool_enable))
in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
out = mish(self.bn1(self.pool1(self.conv1(x)))) # conv block
out = self.conv3(swish(self.bn2(self.conv2(out)))) # sep block
out = self.layers(out)
out = F.adaptive_avg_pool2d(out, 1)
out = out.view(out.size(0), -1)
dropout_rate = self.cfg['dropout_rate']
if self.training and dropout_rate > 0:
out = F.dropout(out, p=dropout_rate)
out = self.linear(out)
return out
def CLNet_V0(num_classes):
cfg = {
'out_channels': [24, 40, 80, 112, 160],
'kernel_size': [(5, 3), (3, 5), (3, 3), (5, 5), (3, 3)],
'pool_enable': [True, True, True, True, False],
'dropout_rate': 0.2
}
return CLNET(cfg, num_classes=num_classes)
import torchinfo
def test():
net = CLNet_V0(10)
torchinfo.summary(net, (1, 3, 32, 32))
x = torch.randn(3, 3, 32, 32, device='cuda')
y = net(x)
print(y.shape)
if __name__ == '__main__':
test()
| 29.031056 | 83 | 0.456354 |
import torch
import torch.nn as nn
import torch.nn.functional as F
def swish(x):
return x * x.sigmoid()
def mish(x):
return x * torch.tanh(F.softplus(x))
class CrossLinkBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, pool_enable):
super(CrossLinkBlock, self).__init__()
self.pool_enable = pool_enable
self.ReLU = nn.ReLU()
self.dconv1_1 = nn.Conv2d(in_channels,
in_channels,
kernel_size=kernel_size[0],
stride=1,
padding='same',
groups=1,
bias=False)
self.dconv1_2 = nn.Conv2d(in_channels,
in_channels,
kernel_size=kernel_size[1],
stride=1,
padding='same',
groups=1,
bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.bn2 = nn.BatchNorm2d(in_channels)
self.pconv = nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=1,
padding='same',
groups=1,
bias=False)
self.bn3 = nn.BatchNorm2d(out_channels)
self.maxpool = nn.MaxPool2d(2, 2)
def forward(self, x):
out1 = self.dconv1_1(x)
out2 = self.dconv1_2(x)
out1 = torch.mul(out1, self.ReLU(out1))
out2 = torch.mul(out1, self.ReLU(out2))
out = self.bn1(out1) + self.bn2(out2)
out = self.bn3(self.pconv(out))
if self.pool_enable:
out = self.maxpool(out)
return out
class CLNET(nn.Module):
def __init__(self, cfg, num_classes=10):
super(CLNET, self).__init__()
self.cfg = cfg
self.conv1 = nn.Conv2d(3,
32,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(32,
32,
kernel_size=3,
stride=1,
padding=1,
groups=1,
bias=False)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32,
16,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.layers = self._make_layers(in_channels=16)
self.linear = nn.Linear(cfg['out_channels'][-1], num_classes)
def _make_layers(self, in_channels):
layers = []
cfg = [self.cfg[k] for k in ['out_channels', 'kernel_size', 'pool_enable']]
for out_channels, kernel_size, pool_enable in zip(*cfg):
layers.append(
CrossLinkBlock(in_channels,
out_channels,
kernel_size,
pool_enable))
in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
out = mish(self.bn1(self.pool1(self.conv1(x))))
out = self.conv3(swish(self.bn2(self.conv2(out))))
out = self.layers(out)
out = F.adaptive_avg_pool2d(out, 1)
out = out.view(out.size(0), -1)
dropout_rate = self.cfg['dropout_rate']
if self.training and dropout_rate > 0:
out = F.dropout(out, p=dropout_rate)
out = self.linear(out)
return out
def CLNet_V0(num_classes):
cfg = {
'out_channels': [24, 40, 80, 112, 160],
'kernel_size': [(5, 3), (3, 5), (3, 3), (5, 5), (3, 3)],
'pool_enable': [True, True, True, True, False],
'dropout_rate': 0.2
}
return CLNET(cfg, num_classes=num_classes)
import torchinfo
def test():
net = CLNet_V0(10)
torchinfo.summary(net, (1, 3, 32, 32))
x = torch.randn(3, 3, 32, 32, device='cuda')
y = net(x)
print(y.shape)
if __name__ == '__main__':
test()
| true | true |
f72e62b8e653411c678624094bfff9469f0cfb03 | 1,672 | py | Python | var/spack/repos/builtin/packages/re2c/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/re2c/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin/packages/re2c/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Re2c(AutotoolsPackage):
"""re2c: a free and open-source lexer generator for C and C++"""
homepage = "https://re2c.org/index.html"
url = "https://github.com/skvadrik/re2c/releases/download/1.2.1/re2c-1.2.1.tar.xz"
version('2.2', sha256='0fc45e4130a8a555d68e230d1795de0216dfe99096b61b28e67c86dfd7d86bda')
version('2.1.1', sha256='036ee264fafd5423141ebd628890775aa9447a4c4068a6307385d7366fe711f8')
version('2.1', sha256='8cba0d95c246c670de8f97f57def83a9c0f2113eaa6f7e4867a941f48f633540')
version('2.0.3', sha256='b2bc1eb8aaaa21ff2fcd26507b7e6e72c5e3d887e58aa515c2155fb17d744278')
version('2.0.2', sha256='6cddbb558dbfd697a729cb4fd3f095524480283b89911ca5221835d8a67ae5e0')
version('2.0.1', sha256='aef8b50bb75905b2d55a7236380c0efdc756fa077fe16d808aaacbb10fb53531')
version('2.0', sha256='89a9d7ee14be10e3779ea7b2c8ea4a964afce6e76b8dbcd5479940681db46d20')
version('1.3', sha256='f37f25ff760e90088e7d03d1232002c2c2672646d5844fdf8e0d51a5cd75a503')
version('1.2.1', sha256='1a4cd706b5b966aeffd78e3cf8b24239470ded30551e813610f9cd1a4e01b817')
def configure_args(self):
return [
'--disable-benchmarks',
'--disable-debug',
'--disable-dependency-tracking',
'--disable-docs',
'--disable-lexers', # requires existing system re2c
'--disable-libs', # experimental
'--enable-golang',
]
| 47.771429 | 95 | 0.727273 |
from spack.package import *
class Re2c(AutotoolsPackage):
homepage = "https://re2c.org/index.html"
url = "https://github.com/skvadrik/re2c/releases/download/1.2.1/re2c-1.2.1.tar.xz"
version('2.2', sha256='0fc45e4130a8a555d68e230d1795de0216dfe99096b61b28e67c86dfd7d86bda')
version('2.1.1', sha256='036ee264fafd5423141ebd628890775aa9447a4c4068a6307385d7366fe711f8')
version('2.1', sha256='8cba0d95c246c670de8f97f57def83a9c0f2113eaa6f7e4867a941f48f633540')
version('2.0.3', sha256='b2bc1eb8aaaa21ff2fcd26507b7e6e72c5e3d887e58aa515c2155fb17d744278')
version('2.0.2', sha256='6cddbb558dbfd697a729cb4fd3f095524480283b89911ca5221835d8a67ae5e0')
version('2.0.1', sha256='aef8b50bb75905b2d55a7236380c0efdc756fa077fe16d808aaacbb10fb53531')
version('2.0', sha256='89a9d7ee14be10e3779ea7b2c8ea4a964afce6e76b8dbcd5479940681db46d20')
version('1.3', sha256='f37f25ff760e90088e7d03d1232002c2c2672646d5844fdf8e0d51a5cd75a503')
version('1.2.1', sha256='1a4cd706b5b966aeffd78e3cf8b24239470ded30551e813610f9cd1a4e01b817')
def configure_args(self):
return [
'--disable-benchmarks',
'--disable-debug',
'--disable-dependency-tracking',
'--disable-docs',
'--disable-lexers',
'--disable-libs',
'--enable-golang',
]
| true | true |
f72e63331fdecca17f380ab15c4073db30e469ad | 3,208 | py | Python | psycholab/examples/prisoners_dilemma.py | jrmendeshurb/google-research | f9fa8cdd2fb77975b524371fd29df008b9dc6cf4 | [
"Apache-2.0"
] | 1 | 2020-08-14T08:11:30.000Z | 2020-08-14T08:11:30.000Z | psycholab/examples/prisoners_dilemma.py | jrmendeshurb/google-research | f9fa8cdd2fb77975b524371fd29df008b9dc6cf4 | [
"Apache-2.0"
] | 12 | 2020-09-25T22:43:27.000Z | 2022-02-10T02:21:35.000Z | psycholab/examples/prisoners_dilemma.py | ZachT1711/google-research | 662e6837a3efa0c40b11cb4122447c4b028d2115 | [
"Apache-2.0"
] | 1 | 2020-03-16T14:21:31.000Z | 2020-03-16T14:21:31.000Z | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""prisoners dilemma grid game.
this example comes from the games introduced in paper
A Polynomial-time Nash Equilibrium Algorithm for Repeated Stochastic Games
by Enrique Munoz de Cote and Michael L. Littman
"""
import numpy as np
from psycholab import game
from psycholab import visualizer
def create_game():
"""Create the prisoners dilemma game."""
art = ['####d####',
'a A B b',
'#########'
]
item_a = game.Item(color=(0, 254, 254))
item_b = game.Item(color=(254, 254, 0))
item_d = game.Item(color=(0, 254, 254))
items = {'a': item_a, 'b': item_b, 'd': item_d}
player_a = game.Player(color=(0, 100, 254))
player_b = game.Player(color=(254, 100, 0))
players = {'A': player_a, 'B': player_b}
env = game.Game(art, items, players, tabular=True)
env.display()
env.add_reward('A_moves', {'A': -1})
env.add_reward('B_moves', {'B': -1})
env.add_reward('A_collects_a', {'A': 100})
env.add_reward('B_collects_b', {'B': 100})
env.add_reward('A_collects_d', {'A': 100})
env.add_reward('B_collects_d', {'B': 100})
env.add_terminaison('A_collects_d')
env.add_terminaison('B_collects_d')
env.add_terminaison('A_collects_a')
env.add_terminaison('B_collects_b')
# for frame-by-frame visualization:
env = visualizer.Visualizer(env, fps=2, by_episode=False)
# for fast visualization:
# env = visualizer.Visualizer(env, fps=1000, by_episode=True)
return env
def run_game(env, max_step):
"""Runs `max_step` iterations of the game `env` and print players returns."""
obs = env.reset()
# discrete_state converts observations into states
# 'obs' contains all agent x, y positions.
# 'state' is an integer representing the combination of
# all agents x, y positions.
state = env.discrete_state(obs)
transitions = []
returns = 0
episode = 0
for _ in range(max_step):
# Pick a random action for all agents:
actions = np.random.choice(range(env.num_actions), env.num_players)
# Environment step:
obs, rewards, done, info = env.step(actions)
new_state = env.discrete_state(obs)
transitions.append((state, new_state, rewards, actions, done, info))
state = new_state
# Sum rewards:
returns += rewards
if done:
# The last episode is finished:
episode += 1
print('episode', episode, 'returns', returns)
# Reset env for new episode
obs = env.reset()
# state = env.discrete_state(obs)
returns = 0
# Close visualizer:
env.finish()
if __name__ == '__main__':
game_env = create_game()
run_game(game_env, max_step=200000)
| 29.431193 | 79 | 0.683603 |
import numpy as np
from psycholab import game
from psycholab import visualizer
def create_game():
art = ['####d####',
'a A B b',
'#########'
]
item_a = game.Item(color=(0, 254, 254))
item_b = game.Item(color=(254, 254, 0))
item_d = game.Item(color=(0, 254, 254))
items = {'a': item_a, 'b': item_b, 'd': item_d}
player_a = game.Player(color=(0, 100, 254))
player_b = game.Player(color=(254, 100, 0))
players = {'A': player_a, 'B': player_b}
env = game.Game(art, items, players, tabular=True)
env.display()
env.add_reward('A_moves', {'A': -1})
env.add_reward('B_moves', {'B': -1})
env.add_reward('A_collects_a', {'A': 100})
env.add_reward('B_collects_b', {'B': 100})
env.add_reward('A_collects_d', {'A': 100})
env.add_reward('B_collects_d', {'B': 100})
env.add_terminaison('A_collects_d')
env.add_terminaison('B_collects_d')
env.add_terminaison('A_collects_a')
env.add_terminaison('B_collects_b')
env = visualizer.Visualizer(env, fps=2, by_episode=False)
return env
def run_game(env, max_step):
obs = env.reset()
state = env.discrete_state(obs)
transitions = []
returns = 0
episode = 0
for _ in range(max_step):
actions = np.random.choice(range(env.num_actions), env.num_players)
obs, rewards, done, info = env.step(actions)
new_state = env.discrete_state(obs)
transitions.append((state, new_state, rewards, actions, done, info))
state = new_state
returns += rewards
if done:
episode += 1
print('episode', episode, 'returns', returns)
obs = env.reset()
returns = 0
env.finish()
if __name__ == '__main__':
game_env = create_game()
run_game(game_env, max_step=200000)
| true | true |
f72e64fb1484ce3cce892ced018d30ae8e0d7a72 | 2,728 | py | Python | compiler/modules/precharge_array.py | panicmarvin/OpenRAM | abf47bab50adb48337c59b72ccd6023c1999f3fc | [
"BSD-3-Clause"
] | null | null | null | compiler/modules/precharge_array.py | panicmarvin/OpenRAM | abf47bab50adb48337c59b72ccd6023c1999f3fc | [
"BSD-3-Clause"
] | null | null | null | compiler/modules/precharge_array.py | panicmarvin/OpenRAM | abf47bab50adb48337c59b72ccd6023c1999f3fc | [
"BSD-3-Clause"
] | 1 | 2020-01-23T07:12:52.000Z | 2020-01-23T07:12:52.000Z | import design
import debug
from tech import drc
from vector import vector
from precharge import precharge
class precharge_array(design.design):
"""
Dynamically generated precharge array of all bitlines. Cols is number
of bit line columns, height is the height of the bit-cell array.
"""
def __init__(self, columns, size=1):
design.design.__init__(self, "precharge_array")
debug.info(1, "Creating {0}".format(self.name))
self.columns = columns
self.pc_cell = precharge(name="precharge", size=size)
self.add_mod(self.pc_cell)
self.width = self.columns * self.pc_cell.width
self.height = self.pc_cell.height
self.add_pins()
self.create_layout()
self.DRC_LVS()
def add_pins(self):
"""Adds pins for spice file"""
for i in range(self.columns):
self.add_pin("bl[{0}]".format(i))
self.add_pin("br[{0}]".format(i))
self.add_pin("en")
self.add_pin("vdd")
def create_layout(self):
self.add_insts()
self.add_layout_pin(text="vdd",
layer="metal1",
offset=self.pc_cell.get_pin("vdd").ll(),
width=self.width,
height=drc["minwidth_metal1"])
self.add_layout_pin(text="en",
layer="metal1",
offset=self.pc_cell.get_pin("en").ll(),
width=self.width,
height=drc["minwidth_metal1"])
def add_insts(self):
"""Creates a precharge array by horizontally tiling the precharge cell"""
for i in range(self.columns):
name = "pre_column_{0}".format(i)
offset = vector(self.pc_cell.width * i, 0)
inst=self.add_inst(name=name,
mod=self.pc_cell,
offset=offset)
bl_pin = inst.get_pin("bl")
self.add_layout_pin(text="bl[{0}]".format(i),
layer="metal2",
offset=bl_pin.ll(),
width=drc["minwidth_metal2"],
height=bl_pin.height())
br_pin = inst.get_pin("br")
self.add_layout_pin(text="br[{0}]".format(i),
layer="metal2",
offset=br_pin.ll(),
width=drc["minwidth_metal2"],
height=bl_pin.height())
self.connect_inst(["bl[{0}]".format(i), "br[{0}]".format(i),
"en", "vdd"])
| 35.428571 | 81 | 0.495235 | import design
import debug
from tech import drc
from vector import vector
from precharge import precharge
class precharge_array(design.design):
def __init__(self, columns, size=1):
design.design.__init__(self, "precharge_array")
debug.info(1, "Creating {0}".format(self.name))
self.columns = columns
self.pc_cell = precharge(name="precharge", size=size)
self.add_mod(self.pc_cell)
self.width = self.columns * self.pc_cell.width
self.height = self.pc_cell.height
self.add_pins()
self.create_layout()
self.DRC_LVS()
def add_pins(self):
for i in range(self.columns):
self.add_pin("bl[{0}]".format(i))
self.add_pin("br[{0}]".format(i))
self.add_pin("en")
self.add_pin("vdd")
def create_layout(self):
self.add_insts()
self.add_layout_pin(text="vdd",
layer="metal1",
offset=self.pc_cell.get_pin("vdd").ll(),
width=self.width,
height=drc["minwidth_metal1"])
self.add_layout_pin(text="en",
layer="metal1",
offset=self.pc_cell.get_pin("en").ll(),
width=self.width,
height=drc["minwidth_metal1"])
def add_insts(self):
for i in range(self.columns):
name = "pre_column_{0}".format(i)
offset = vector(self.pc_cell.width * i, 0)
inst=self.add_inst(name=name,
mod=self.pc_cell,
offset=offset)
bl_pin = inst.get_pin("bl")
self.add_layout_pin(text="bl[{0}]".format(i),
layer="metal2",
offset=bl_pin.ll(),
width=drc["minwidth_metal2"],
height=bl_pin.height())
br_pin = inst.get_pin("br")
self.add_layout_pin(text="br[{0}]".format(i),
layer="metal2",
offset=br_pin.ll(),
width=drc["minwidth_metal2"],
height=bl_pin.height())
self.connect_inst(["bl[{0}]".format(i), "br[{0}]".format(i),
"en", "vdd"])
| true | true |
f72e6521b83c2e7be6a5b020d3b956027f505035 | 14,176 | py | Python | warehouse-loader/warehouse/components/services.py | uk-gov-mirror/NHSX.covid-chest-imaging-database | 77799a97193d09e9267182d18fbb79d604bbb038 | [
"MIT"
] | 56 | 2020-04-08T12:40:28.000Z | 2021-10-02T22:57:16.000Z | warehouse-loader/warehouse/components/services.py | uk-gov-mirror/NHSX.covid-chest-imaging-database | 77799a97193d09e9267182d18fbb79d604bbb038 | [
"MIT"
] | 111 | 2020-04-02T13:23:06.000Z | 2022-03-30T13:23:28.000Z | warehouse-loader/warehouse/components/services.py | uk-gov-mirror/NHSX.covid-chest-imaging-database | 77799a97193d09e9267182d18fbb79d604bbb038 | [
"MIT"
] | 10 | 2020-05-05T14:07:11.000Z | 2022-01-11T15:47:27.000Z | import csv
import gzip
import logging
import re
import sys
import tempfile
import boto3
import mondrian
from botocore.exceptions import ClientError
from warehouse.components.constants import TRAINING_PERCENTAGE
mondrian.setup(excepthook=True)
logger = logging.getLogger()
class PipelineConfig:
"""Configuration settings for the whole pipeline"""
def __init__(self):
self.config = dict(
{
"raw_prefixes": [],
"training_percentage": TRAINING_PERCENTAGE,
"sites": {"split": [], "training": [], "validation": []},
}
)
self.sites = dict()
def set_config(self, input_config):
"""Setting pipeline configuration from supplied data.
Parameters
----------
input_config : dict
The configuration to ingest and set internally.
"""
self.config = input_config
# Preprocess site groups
for group in self.config["sites"].keys():
for site in self.config["sites"][group]:
self.sites[site] = group
logger.debug(f"Training percentage: {self.get_training_percentage()}%")
def get_raw_prefixes(self):
"""Return a set of raw prefixes that the configuration
is set to process.
Returns
-------
set
A set of configured "raw-..." prefixes to process by the pipeline
"""
return set(self.config.get("raw_prefixes", []))
def get_training_percentage(self):
"""Return set training precentage, either default or configured
Returns
-------
int
the proportion of random assignment to the training set (0-100)
"""
training_percent = self.config.get(
"training_percentage", TRAINING_PERCENTAGE
)
if training_percent > 100:
training_percent = 100
if training_percent < 0:
training_percent = 0
return training_percent
def get_site_group(self, submitting_centre):
"""Get the group (training/validation) to which a
submitting centre is assigned for.
Parameters
----------
submitting_centre : str
The submitting centre's name to look up
Returns
-------
group : str
The group (training, validation, split) that the
given centre is configured for
"""
return self.sites.get(submitting_centre)
class S3Client:
def __init__(self, bucket):
self._bucket = bucket
self._client = boto3.client("s3")
@property
def bucket(self):
return self._bucket
@property
def client(self):
return self._client
def object_exists(self, key):
"""Checking whether a given object exists in our work bucket
Parameters
----------
key : str
The object key in question.
Returns
-------
boolean
True if object exists in the work bucket.
Raises
------
botocore.exceptions.ClientError
If there's any client side transfer error.
FileNotFoundError
If the file to be uploaded doesn't exists.
"""
try:
self._client.head_object(Bucket=self._bucket, Key=key)
except ClientError as e:
if e.response["Error"]["Code"] == "404":
return False
else:
raise ClientError
else:
return True
def get_object(self, key):
try:
args = {"Bucket": self._bucket, "Key": key}
return self._client.get_object(**args)
except ClientError:
raise
def object_content(self, key, content_range=None):
try:
args = {"Bucket": self._bucket, "Key": key}
if content_range is not None:
args["Range"] = content_range
file_content = self._client.get_object(**args)["Body"].read()
except ClientError:
raise
return file_content
def put_object(self, key, content):
try:
args = {"Bucket": self._bucket, "Key": key, "Body": content}
self._client.put_object(**args)
except ClientError:
raise
def copy_object(self, old_key, new_key):
try:
args = {
"Bucket": self._bucket,
"CopySource": {"Bucket": self._bucket, "Key": old_key},
"Key": new_key,
}
self._client.copy_object(**args)
except ClientError:
raise
def upload_file(self, key, file_name):
try:
self._client.upload_file(file_name, self._bucket, key)
except (ClientError, FileNotFoundError):
raise
class InventoryDownloader:
def __init__(self, main_bucket):
self.main_bucket = main_bucket
self.inventory_bucket = self.main_bucket + "-inventory"
self._get_inventory_list()
def _get_inventory_list(self):
try:
inventory_bucket = self.main_bucket + "-inventory"
s3_client = boto3.client("s3")
# Get the latest list of inventory files
objs = s3_client.list_objects_v2(
Bucket=inventory_bucket,
Prefix=f"{self.main_bucket}/daily-full-inventory/hive",
)["Contents"]
latest_symlink = sorted([obj["Key"] for obj in objs])[-1]
response = s3_client.get_object(
Bucket=inventory_bucket, Key=latest_symlink
)
self.inventory_list = [
line.replace(f"s3://{inventory_bucket}/", "")
for line in response["Body"].read().decode("utf-8").split("\n")
]
except Exception as e: # noqa: E722
logger.error(f"Can't use inventory due to run time error: {e}")
sys.exit(1)
def get_inventory(self, excludeline=set()):
"""Iterate through all the inventory files, and passing back a reader
to use the data from them.
Parameters
----------
exclideline : set
Listing all the fragments of the inventory to exclude from reading
Yields
------
tuple[int, _csv.reader]
Index of the given inventory fragment and a CSV reader initialized
"""
try:
s3_client = boto3.client("s3")
for index, inventory_file in enumerate(self.inventory_list):
if index in excludeline:
logger.debug(
f"Skipping inventory file as requested: {inventory_file}"
)
continue
logger.debug(f"Downloading inventory file: {inventory_file}")
with tempfile.TemporaryFile(mode="w+b") as f:
s3_client.download_fileobj(
self.inventory_bucket, inventory_file, f
)
f.seek(0)
with gzip.open(f, mode="rt") as cf:
reader = csv.reader(cf)
yield index, reader
except Exception as e: # noqa: E722
logger.error(f"Can't use inventory due to run time error: {e}")
sys.exit(1)
def get_bucket(self):
"""The S3 bucket that this downloader is configured to use.
Returns
-------
str
S3 bucket name
"""
return self.main_bucket
class CacheContradiction(Exception):
pass
class PatientCache:
"""A cache to store group assignments of patient IDs"""
def __init__(self, downloader):
"""A cache to store group assignments of patient IDs.
Parameters
----------
downloader: InventoryDownloader
An initialized downloader instance.
"""
self.downloader = downloader
self.store = dict()
self._load_cache()
def _load_cache(self):
pattern = re.compile(
r"^(?P<group>training|validation)/data/(?P<pseudonym>[^/]*)/[^/]*$"
)
for f, fragment_reader in self.downloader.get_inventory():
for row in fragment_reader:
key = row[1]
key_match = pattern.match(key)
if key_match:
self.add(
key_match.group("pseudonym"),
key_match.group("group"),
)
def add(self, patient_id, group):
"""Add an item to an existing patient cache
Paramters
---------
patient_id : str
The patient ID or pseudonym to store
group : str
Expected group is "training" or "validation", only stores whether the patient is in the "training group or not.
"""
if patient_id not in self.store:
self.store[patient_id] = group == "training"
elif self.store[patient_id] != (group == "training"):
raise CacheContradiction(
f"Found patient with ambiguous groups: {patient_id}"
)
def get_group(self, patient_id):
"""Check if a given patient is in "training" or "validation" group,
or even known to the cache or not.
Parameters
----------
patient_id : str
The patient ID / pseudonym in question
Returns
-------
group : str or None
The values "training" or "validation" if grouping is known, or None if patient is not in cache.
"""
group = None
try:
group = "training" if self.store[patient_id] else "validation"
except KeyError:
# Not Cached
pass
return group
class FileList:
def __init__(self, downloader):
self.downloader = downloader
self.bucket = downloader.get_bucket()
def get_raw_data_list(self, raw_prefixes=set()):
"""Get the list of raw data files from the inventory
Parameters
----------
raw_prefixes : set, default=set()
The raw prefixes to consider for processing in the warehouse.
Yields
------
str
The keys for the raw data files found
"""
pattern = re.compile(
r"^(?P<raw_prefix>raw-.*)/(\d{4}-\d{2}-\d{2})/data/(?P<filename>[^/]*)$"
)
for r, fragment_reader in self.downloader.get_inventory():
for row in fragment_reader:
key = row[1]
key_match = pattern.match(key)
if key_match and key_match.group("raw_prefix") in raw_prefixes:
yield key
def get_pending_raw_images_list(self, raw_prefixes=set()):
"""Get the list of raw data files from the inventory
Parameters
----------
raw_prefixes : set, default=set()
The raw prefixes to consider for processing in the warehouse.
Yields
------
str
The keys for raw image files that seem not yet to be processed.
"""
raw_pattern = re.compile(
r"^(?P<raw_prefix>raw-.*)/\d{4}-\d{2}-\d{2}/images/(?P<filename>[^/]*)$"
)
processed_pattern = re.compile(
r"^(training|validation)/(xray|ct|mri).*/(?P<filename>[^/]*)$"
)
fragment_excludelist = set()
for _, fragment_reader in self.downloader.get_inventory():
raw_list = dict()
for row in fragment_reader:
key = row[1]
key_match = raw_pattern.match(key)
if key_match and key_match.group("raw_prefix") in raw_prefixes:
raw_list[key_match.group("filename")] = key
unprocessed = set(raw_list.keys())
unprocessed_json = {
key.replace(".dcm", ".json") for key in unprocessed
}
if len(unprocessed) == 0:
continue
for f, fragment_reader2 in self.downloader.get_inventory(
fragment_excludelist
):
filenames = set()
for row in fragment_reader2:
# Processed file cache
item = processed_pattern.match(row[1])
if item:
filenames.add(item.group("filename"))
if len(filenames) == 0:
fragment_excludelist.add(f)
unprocessed = unprocessed - filenames
unprocessed_json = unprocessed_json - filenames
if len(unprocessed) == 0 and len(unprocessed_json) == 0:
break
unprocessed |= {
key.replace(".json", ".dcm") for key in unprocessed_json
}
for unproc in unprocessed:
yield raw_list[unproc]
def get_processed_data_list(self):
"""Getting the list of processed data files from the warehouse
Yields
------
str
The keys to the processed data files to look at.
"""
pattern = re.compile(
r"^(training|validation)/data/.*/(?P<filename>[^/]*)$"
)
for _, fragment_reader in self.downloader.get_inventory():
for row in fragment_reader:
key = row[1]
key_match = pattern.match(key)
if key_match:
yield key
def get_processed_images_list(self):
"""Getting the list of processed non-data files (ie. images and
metadata) from the warehouse.
Yields
------
str
The keys to the processed data files to look at.
"""
pattern = re.compile(
r"^(training|validation)/(?!data)[^/]*/.*/(?P<filename>[^/]*)$"
)
for _, fragment_reader in self.downloader.get_inventory():
for row in fragment_reader:
key = row[1]
key_match = pattern.match(key)
if key_match:
yield key
| 31.85618 | 123 | 0.541267 | import csv
import gzip
import logging
import re
import sys
import tempfile
import boto3
import mondrian
from botocore.exceptions import ClientError
from warehouse.components.constants import TRAINING_PERCENTAGE
mondrian.setup(excepthook=True)
logger = logging.getLogger()
class PipelineConfig:
def __init__(self):
self.config = dict(
{
"raw_prefixes": [],
"training_percentage": TRAINING_PERCENTAGE,
"sites": {"split": [], "training": [], "validation": []},
}
)
self.sites = dict()
def set_config(self, input_config):
self.config = input_config
for group in self.config["sites"].keys():
for site in self.config["sites"][group]:
self.sites[site] = group
logger.debug(f"Training percentage: {self.get_training_percentage()}%")
def get_raw_prefixes(self):
return set(self.config.get("raw_prefixes", []))
def get_training_percentage(self):
training_percent = self.config.get(
"training_percentage", TRAINING_PERCENTAGE
)
if training_percent > 100:
training_percent = 100
if training_percent < 0:
training_percent = 0
return training_percent
def get_site_group(self, submitting_centre):
return self.sites.get(submitting_centre)
class S3Client:
def __init__(self, bucket):
self._bucket = bucket
self._client = boto3.client("s3")
@property
def bucket(self):
return self._bucket
@property
def client(self):
return self._client
def object_exists(self, key):
try:
self._client.head_object(Bucket=self._bucket, Key=key)
except ClientError as e:
if e.response["Error"]["Code"] == "404":
return False
else:
raise ClientError
else:
return True
def get_object(self, key):
try:
args = {"Bucket": self._bucket, "Key": key}
return self._client.get_object(**args)
except ClientError:
raise
def object_content(self, key, content_range=None):
try:
args = {"Bucket": self._bucket, "Key": key}
if content_range is not None:
args["Range"] = content_range
file_content = self._client.get_object(**args)["Body"].read()
except ClientError:
raise
return file_content
def put_object(self, key, content):
try:
args = {"Bucket": self._bucket, "Key": key, "Body": content}
self._client.put_object(**args)
except ClientError:
raise
def copy_object(self, old_key, new_key):
try:
args = {
"Bucket": self._bucket,
"CopySource": {"Bucket": self._bucket, "Key": old_key},
"Key": new_key,
}
self._client.copy_object(**args)
except ClientError:
raise
def upload_file(self, key, file_name):
try:
self._client.upload_file(file_name, self._bucket, key)
except (ClientError, FileNotFoundError):
raise
class InventoryDownloader:
def __init__(self, main_bucket):
self.main_bucket = main_bucket
self.inventory_bucket = self.main_bucket + "-inventory"
self._get_inventory_list()
def _get_inventory_list(self):
try:
inventory_bucket = self.main_bucket + "-inventory"
s3_client = boto3.client("s3")
objs = s3_client.list_objects_v2(
Bucket=inventory_bucket,
Prefix=f"{self.main_bucket}/daily-full-inventory/hive",
)["Contents"]
latest_symlink = sorted([obj["Key"] for obj in objs])[-1]
response = s3_client.get_object(
Bucket=inventory_bucket, Key=latest_symlink
)
self.inventory_list = [
line.replace(f"s3://{inventory_bucket}/", "")
for line in response["Body"].read().decode("utf-8").split("\n")
]
except Exception as e:
logger.error(f"Can't use inventory due to run time error: {e}")
sys.exit(1)
def get_inventory(self, excludeline=set()):
try:
s3_client = boto3.client("s3")
for index, inventory_file in enumerate(self.inventory_list):
if index in excludeline:
logger.debug(
f"Skipping inventory file as requested: {inventory_file}"
)
continue
logger.debug(f"Downloading inventory file: {inventory_file}")
with tempfile.TemporaryFile(mode="w+b") as f:
s3_client.download_fileobj(
self.inventory_bucket, inventory_file, f
)
f.seek(0)
with gzip.open(f, mode="rt") as cf:
reader = csv.reader(cf)
yield index, reader
except Exception as e: # noqa: E722
logger.error(f"Can't use inventory due to run time error: {e}")
sys.exit(1)
def get_bucket(self):
return self.main_bucket
class CacheContradiction(Exception):
pass
class PatientCache:
def __init__(self, downloader):
self.downloader = downloader
self.store = dict()
self._load_cache()
def _load_cache(self):
pattern = re.compile(
r"^(?P<group>training|validation)/data/(?P<pseudonym>[^/]*)/[^/]*$"
)
for f, fragment_reader in self.downloader.get_inventory():
for row in fragment_reader:
key = row[1]
key_match = pattern.match(key)
if key_match:
self.add(
key_match.group("pseudonym"),
key_match.group("group"),
)
def add(self, patient_id, group):
if patient_id not in self.store:
self.store[patient_id] = group == "training"
elif self.store[patient_id] != (group == "training"):
raise CacheContradiction(
f"Found patient with ambiguous groups: {patient_id}"
)
def get_group(self, patient_id):
group = None
try:
group = "training" if self.store[patient_id] else "validation"
except KeyError:
pass
return group
class FileList:
def __init__(self, downloader):
self.downloader = downloader
self.bucket = downloader.get_bucket()
def get_raw_data_list(self, raw_prefixes=set()):
pattern = re.compile(
r"^(?P<raw_prefix>raw-.*)/(\d{4}-\d{2}-\d{2})/data/(?P<filename>[^/]*)$"
)
for r, fragment_reader in self.downloader.get_inventory():
for row in fragment_reader:
key = row[1]
key_match = pattern.match(key)
if key_match and key_match.group("raw_prefix") in raw_prefixes:
yield key
def get_pending_raw_images_list(self, raw_prefixes=set()):
raw_pattern = re.compile(
r"^(?P<raw_prefix>raw-.*)/\d{4}-\d{2}-\d{2}/images/(?P<filename>[^/]*)$"
)
processed_pattern = re.compile(
r"^(training|validation)/(xray|ct|mri).*/(?P<filename>[^/]*)$"
)
fragment_excludelist = set()
for _, fragment_reader in self.downloader.get_inventory():
raw_list = dict()
for row in fragment_reader:
key = row[1]
key_match = raw_pattern.match(key)
if key_match and key_match.group("raw_prefix") in raw_prefixes:
raw_list[key_match.group("filename")] = key
unprocessed = set(raw_list.keys())
unprocessed_json = {
key.replace(".dcm", ".json") for key in unprocessed
}
if len(unprocessed) == 0:
continue
for f, fragment_reader2 in self.downloader.get_inventory(
fragment_excludelist
):
filenames = set()
for row in fragment_reader2:
item = processed_pattern.match(row[1])
if item:
filenames.add(item.group("filename"))
if len(filenames) == 0:
fragment_excludelist.add(f)
unprocessed = unprocessed - filenames
unprocessed_json = unprocessed_json - filenames
if len(unprocessed) == 0 and len(unprocessed_json) == 0:
break
unprocessed |= {
key.replace(".json", ".dcm") for key in unprocessed_json
}
for unproc in unprocessed:
yield raw_list[unproc]
def get_processed_data_list(self):
pattern = re.compile(
r"^(training|validation)/data/.*/(?P<filename>[^/]*)$"
)
for _, fragment_reader in self.downloader.get_inventory():
for row in fragment_reader:
key = row[1]
key_match = pattern.match(key)
if key_match:
yield key
def get_processed_images_list(self):
pattern = re.compile(
r"^(training|validation)/(?!data)[^/]*/.*/(?P<filename>[^/]*)$"
)
for _, fragment_reader in self.downloader.get_inventory():
for row in fragment_reader:
key = row[1]
key_match = pattern.match(key)
if key_match:
yield key
| true | true |
f72e65bb1baacfec1ef3804586f97ab8315edec7 | 2,585 | py | Python | cootbx/__init__.py | jbeilstenedmands/cctbx_project | c228fb15ab10377f664c39553d866281358195aa | [
"BSD-3-Clause-LBNL"
] | null | null | null | cootbx/__init__.py | jbeilstenedmands/cctbx_project | c228fb15ab10377f664c39553d866281358195aa | [
"BSD-3-Clause-LBNL"
] | null | null | null | cootbx/__init__.py | jbeilstenedmands/cctbx_project | c228fb15ab10377f664c39553d866281358195aa | [
"BSD-3-Clause-LBNL"
] | null | null | null |
from __future__ import division
import os.path
def write_disable_nomenclature_errors (f) :
f.write("try :\n")
f.write(" set_nomenclature_errors_on_read(\"ignore\")\n")
f.write("except Exception :\n")
f.write(" pass\n")
def create_refinement_view_script (
mtz_file_name,
pdb_file_name,
coot_script_name="view_in_coot.py",
work_dir=None,
show_symmetry=True,
peaks_file_name=None,
bad_ligand_list=None,
placed_ligand_list=None) :
from iotbx.file_reader import any_file
from libtbx.utils import concatenate_python_script
import libtbx.load_env
have_anom_map = False
have_anom_residual_map = False
mtz_in = any_file(mtz_file_name).assert_file_type("hkl")
have_anom_map = have_residual_map = False
for array in mtz_in.file_server.miller_arrays :
labels = array.info().labels
if ("ANOM" in labels) :
have_anom_map = True
elif ("ANOMDIFF" in labels) :
have_anom_residual_map = True
f = open(coot_script_name, "w")
print >> f, "import coot"
print >> f, "import os"
write_disable_nomenclature_errors(f)
load_script = libtbx.env.find_in_repositories(
relative_path="cctbx_project/cootbx/view_refinement.py",
test=os.path.isfile)
assert (load_script is not None)
concatenate_python_script(out=f, file_name=load_script)
zoom_ligand_script = libtbx.env.find_in_repositories(
relative_path="cctbx_project/cootbx/simple_zoom_list.py",
test=os.path.isfile)
concatenate_python_script(out=f, file_name=zoom_ligand_script)
if (work_dir is not None) :
pdb_file_name = os.path.basename(pdb_file_name)
mtz_file_name = os.path.basename(mtz_file_name)
f.write("""load_refinement(\n""")
f.write("""pdb_file="%s",\n""" % pdb_file_name)
f.write("""map_file="%s",\n""" % mtz_file_name)
f.write("""show_symmetry=%s,\n""" % show_symmetry)
f.write("""have_anom_map=%s,\n""" % have_anom_map)
f.write("""have_residual_map=%s,\n""" % have_residual_map)
if (work_dir is not None) :
f.write("""work_dir="%s",\n""" % work_dir)
if (peaks_file_name is not None) :
f.write("""peaks_file="%s",\n""" % peaks_file_name)
f.write(")\n")
if (bad_ligand_list is not None) and (len(bad_ligand_list) > 0) :
print >> f, """draw_simple_zoom_list("""
print >> f, """ title="Residues in suspicious density","""
print >> f, """ items=%s)""" % str(bad_ligand_list)
if (placed_ligand_list is not None) :
print >> f, """draw_simple_zoom_list("""
print >> f, """ title="Placed ligands","""
print >> f, """ items=%s)""" % str(placed_ligand_list)
f.close()
| 37.463768 | 67 | 0.696325 |
from __future__ import division
import os.path
def write_disable_nomenclature_errors (f) :
f.write("try :\n")
f.write(" set_nomenclature_errors_on_read(\"ignore\")\n")
f.write("except Exception :\n")
f.write(" pass\n")
def create_refinement_view_script (
mtz_file_name,
pdb_file_name,
coot_script_name="view_in_coot.py",
work_dir=None,
show_symmetry=True,
peaks_file_name=None,
bad_ligand_list=None,
placed_ligand_list=None) :
from iotbx.file_reader import any_file
from libtbx.utils import concatenate_python_script
import libtbx.load_env
have_anom_map = False
have_anom_residual_map = False
mtz_in = any_file(mtz_file_name).assert_file_type("hkl")
have_anom_map = have_residual_map = False
for array in mtz_in.file_server.miller_arrays :
labels = array.info().labels
if ("ANOM" in labels) :
have_anom_map = True
elif ("ANOMDIFF" in labels) :
have_anom_residual_map = True
f = open(coot_script_name, "w")
print >> f, "import coot"
print >> f, "import os"
write_disable_nomenclature_errors(f)
load_script = libtbx.env.find_in_repositories(
relative_path="cctbx_project/cootbx/view_refinement.py",
test=os.path.isfile)
assert (load_script is not None)
concatenate_python_script(out=f, file_name=load_script)
zoom_ligand_script = libtbx.env.find_in_repositories(
relative_path="cctbx_project/cootbx/simple_zoom_list.py",
test=os.path.isfile)
concatenate_python_script(out=f, file_name=zoom_ligand_script)
if (work_dir is not None) :
pdb_file_name = os.path.basename(pdb_file_name)
mtz_file_name = os.path.basename(mtz_file_name)
f.write("""load_refinement(\n""")
f.write("""pdb_file="%s",\n""" % pdb_file_name)
f.write("""map_file="%s",\n""" % mtz_file_name)
f.write("""show_symmetry=%s,\n""" % show_symmetry)
f.write("""have_anom_map=%s,\n""" % have_anom_map)
f.write("""have_residual_map=%s,\n""" % have_residual_map)
if (work_dir is not None) :
f.write("""work_dir="%s",\n""" % work_dir)
if (peaks_file_name is not None) :
f.write("""peaks_file="%s",\n""" % peaks_file_name)
f.write(")\n")
if (bad_ligand_list is not None) and (len(bad_ligand_list) > 0) :
print >> f, """draw_simple_zoom_list("""
print >> f, """ title="Residues in suspicious density","""
print >> f, """ items=%s)""" % str(bad_ligand_list)
if (placed_ligand_list is not None) :
print >> f, """draw_simple_zoom_list("""
print >> f, """ title="Placed ligands","""
print >> f, """ items=%s)""" % str(placed_ligand_list)
f.close()
| true | true |
f72e681168a596571b63014df7cf94e45a96aa0d | 746 | py | Python | JinaAI/utils/get_data.py | TheVikJ/SUAVE | eff37d167a4318ba8ba77dff873422c89db489b2 | [
"MIT"
] | 6 | 2021-07-24T05:28:51.000Z | 2021-11-08T12:55:56.000Z | JinaAI/utils/get_data.py | TheVikJ/SUAVE | eff37d167a4318ba8ba77dff873422c89db489b2 | [
"MIT"
] | null | null | null | JinaAI/utils/get_data.py | TheVikJ/SUAVE | eff37d167a4318ba8ba77dff873422c89db489b2 | [
"MIT"
] | 2 | 2021-07-24T16:22:33.000Z | 2021-08-01T12:55:05.000Z | import json
import requests
import pandas as pd
import os
baseurl = "http://exploreapiswith.tech/api/"
categories = json.loads(requests.get(
baseurl + "category").text)
def get_category_api(category_name=None):
category_apis = json.loads(requests.get(
baseurl + "category/" + category_name).text)
return category_apis
api_list = []
for category in categories:
api = get_category_api(category)
api_list += api
if os.path.exists("data/apis.json"):
os.remove("data/apis.json")
if os.path.exists("data/apis.csv"):
os.remove("data/apis.csv")
with open(r"data/apis.json", "x") as f:
json.dump(api_list, f)
json_file = pd.read_json(r"data/apis.json")
json_file.to_csv(r"data/apis.csv", index=False)
| 19.631579 | 52 | 0.698391 | import json
import requests
import pandas as pd
import os
baseurl = "http://exploreapiswith.tech/api/"
categories = json.loads(requests.get(
baseurl + "category").text)
def get_category_api(category_name=None):
category_apis = json.loads(requests.get(
baseurl + "category/" + category_name).text)
return category_apis
api_list = []
for category in categories:
api = get_category_api(category)
api_list += api
if os.path.exists("data/apis.json"):
os.remove("data/apis.json")
if os.path.exists("data/apis.csv"):
os.remove("data/apis.csv")
with open(r"data/apis.json", "x") as f:
json.dump(api_list, f)
json_file = pd.read_json(r"data/apis.json")
json_file.to_csv(r"data/apis.csv", index=False)
| true | true |
f72e6826e6b680e35cd4d77fd37db27ecc6e072d | 24,999 | py | Python | libs/canvas.py | yangjjie94/labelImg | 8c14af70bfc0e6f3e22e3a6cbe21e586b0492ba0 | [
"MIT"
] | null | null | null | libs/canvas.py | yangjjie94/labelImg | 8c14af70bfc0e6f3e22e3a6cbe21e586b0492ba0 | [
"MIT"
] | null | null | null | libs/canvas.py | yangjjie94/labelImg | 8c14af70bfc0e6f3e22e3a6cbe21e586b0492ba0 | [
"MIT"
] | null | null | null |
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
#from PyQt4.QtOpenGL import *
from libs.shape import Shape
from libs.lib import distance
CURSOR_DEFAULT = Qt.ArrowCursor
CURSOR_POINT = Qt.PointingHandCursor
CURSOR_DRAW = Qt.CrossCursor
CURSOR_MOVE = Qt.ClosedHandCursor
CURSOR_GRAB = Qt.OpenHandCursor
# class Canvas(QGLWidget):
class Canvas(QWidget):
zoomRequest = pyqtSignal(int)
scrollRequest = pyqtSignal(int, int)
newShape = pyqtSignal()
selectionChanged = pyqtSignal(bool)
shapeMoved = pyqtSignal()
drawingPolygon = pyqtSignal(bool)
CREATE, EDIT = list(range(2))
epsilon = 11.0
def __init__(self, *args, **kwargs):
super(Canvas, self).__init__(*args, **kwargs)
# Initialise local state.
self.mode = self.EDIT
self.shapes = []
self.current = None
self.selectedShape = None # save the selected shape here
self.selectedShapeCopy = None
self.drawingLineColor = QColor(0, 0, 255)
self.drawingRectColor = QColor(0, 0, 255)
self.line = Shape(line_color=self.drawingLineColor)
self.prevPoint = QPointF()
self.offsets = QPointF(), QPointF()
self.scale = 1.0
self.pixmap = QPixmap()
self.visible = {}
self._hideBackround = False
self.hideBackround = False
self.hShape = None
self.hVertex = None
self._painter = QPainter()
self._cursor = CURSOR_DEFAULT
# Menus:
self.menus = (QMenu(), QMenu())
# Set widget options.
self.setMouseTracking(True) # allow mouseMoveEvent()
self.setFocusPolicy(Qt.WheelFocus)
self.verified = False
def setDrawingColor(self, qColor):
self.drawingLineColor = qColor
self.drawingRectColor = qColor
def enterEvent(self, ev):
self.overrideCursor(self._cursor)
def leaveEvent(self, ev):
self.restoreCursor()
def focusOutEvent(self, ev):
self.restoreCursor()
def isVisible(self, shape):
return self.visible.get(shape, True) # if not found, return True
def drawing(self):
return self.mode == self.CREATE
def editing(self):
return self.mode == self.EDIT
def setEditing(self, value=True):
self.mode = self.EDIT if value else self.CREATE
if not value: # Create
self.unHighlight()
self.deSelectShape()
self.prevPoint = QPointF()
self.repaint()
def unHighlight(self):
if self.hShape:
self.hShape.highlightClear()
self.hVertex = self.hShape = None
def selectedVertex(self):
return self.hVertex is not None
def mouseMoveEvent(self, ev):
"""Update line with last point and current coordinates."""
pos = self.transformPos(ev.pos())
# Update coordinates in status bar if image is opened
window = self.parent().window()
if window.filePath is not None:
self.parent().window().labelCoordinates.setText(
'X: %d; Y: %d' % (pos.x(), pos.y()))
# Polygon drawing.
if self.drawing():
self.overrideCursor(CURSOR_DRAW)
if self.current:
color = self.drawingLineColor
if self.outOfPixmap(pos):
# Don't allow the user to draw outside the pixmap.
# Project the point to the pixmap's edges.
pos = self.intersectionPoint(self.current[-1], pos)
elif len(self.current) > 1 and self.closeEnough(pos, self.current[0]):
# Attract line to starting point and colorise to alert the
# user:
pos = self.current[0]
color = self.current.line_color
self.overrideCursor(CURSOR_POINT)
self.current.highlightVertex(0, Shape.NEAR_VERTEX)
self.line[1] = pos
self.line.line_color = color
self.prevPoint = QPointF()
self.current.highlightClear()
else:
self.prevPoint = pos
self.repaint()
return
# Polygon copy moving.
if Qt.RightButton & ev.buttons():
if self.selectedShapeCopy and self.prevPoint:
self.overrideCursor(CURSOR_MOVE)
self.boundedMoveShape(self.selectedShapeCopy, pos)
self.repaint()
elif self.selectedShape:
self.selectedShapeCopy = self.selectedShape.copy()
self.repaint()
return
# Polygon/Vertex moving.
if Qt.LeftButton & ev.buttons():
if self.selectedVertex():
self.boundedMoveVertex(pos)
self.shapeMoved.emit()
self.repaint()
elif self.selectedShape and self.prevPoint:
self.overrideCursor(CURSOR_MOVE)
self.boundedMoveShape(self.selectedShape, pos)
self.shapeMoved.emit()
self.repaint()
return
# Just hovering over the canvas, 2 posibilities:
# - Highlight shapes
# - Highlight vertex
# Update shape/vertex fill and tooltip value accordingly.
self.setToolTip("Image")
for shape in reversed([s for s in self.shapes if self.isVisible(s)]):
# Look for a nearby vertex to highlight. If that fails,
# check if we happen to be inside a shape.
index = shape.nearestVertex(pos, self.epsilon)
if index is not None:
if self.selectedVertex():
self.hShape.highlightClear()
self.hVertex, self.hShape = index, shape
shape.highlightVertex(index, shape.MOVE_VERTEX)
self.overrideCursor(CURSOR_POINT)
self.setToolTip("Click & drag to move point")
self.setStatusTip(self.toolTip())
self.update()
break
elif shape.containsPoint(pos):
if self.selectedVertex():
self.hShape.highlightClear()
self.hVertex, self.hShape = None, shape
self.setToolTip(
"Click & drag to move shape '%s'" % shape.label)
self.setStatusTip(self.toolTip())
self.overrideCursor(CURSOR_GRAB)
self.update()
break
else: # Nothing found, clear highlights, reset state.
if self.hShape:
self.hShape.highlightClear()
self.update()
self.hVertex, self.hShape = None, None
self.overrideCursor(CURSOR_DEFAULT)
def mousePressEvent(self, ev):
pos = self.transformPos(ev.pos())
if ev.button() == Qt.LeftButton:
if self.drawing():
self.handleDrawing(pos)
else:
self.selectShapePoint(pos)
self.prevPoint = pos
self.repaint()
elif ev.button() == Qt.RightButton and self.editing():
self.selectShapePoint(pos)
self.prevPoint = pos
self.repaint()
def mouseReleaseEvent(self, ev):
if ev.button() == Qt.RightButton:
menu = self.menus[bool(self.selectedShapeCopy)]
self.restoreCursor()
if not menu.exec_(self.mapToGlobal(ev.pos()))\
and self.selectedShapeCopy:
# Cancel the move by deleting the shadow copy.
self.selectedShapeCopy = None
self.repaint()
elif ev.button() == Qt.LeftButton and self.selectedShape:
if self.selectedVertex():
self.overrideCursor(CURSOR_POINT)
else:
self.overrideCursor(CURSOR_GRAB)
elif ev.button() == Qt.LeftButton:
pos = self.transformPos(ev.pos())
if self.drawing():
self.handleDrawing(pos)
def endMove(self, copy=False):
# if copy, shift the selected, and keep both Shape
# if just move, shift the selected, and remove the former
assert self.selectedShape and self.selectedShapeCopy
shape = self.selectedShapeCopy
#del shape.fill_color
#del shape.line_color
if copy:
self.shapes.append(shape)
self.selectedShape.selected = False
self.selectedShape = shape
self.repaint()
else: # just move
self.selectedShape.points = [p for p in shape.points]
self.selectedShapeCopy = None
def hideBackroundShapes(self, value):
self.hideBackround = value
if self.selectedShape:
# Only hide other shapes if there is a current selection.
# Otherwise the user will not be able to select a shape.
self.setHiding(True)
self.repaint()
def handleDrawing(self, pos):
if self.current and (self.current.reachMaxPoints() is False):
initPos = self.current[0]
minX = initPos.x()
minY = initPos.y()
targetPos = self.line[1]
maxX = targetPos.x()
maxY = targetPos.y()
self.current.addPoint(QPointF(maxX, minY))
self.current.addPoint(targetPos)
self.current.addPoint(QPointF(minX, maxY))
self.finalise()
elif not self.outOfPixmap(pos):
self.current = Shape()
self.current.addPoint(pos)
self.line.points = [pos, pos]
self.setHiding()
self.drawingPolygon.emit(True)
self.update()
def setHiding(self, enable=True):
self._hideBackround = self.hideBackround if enable else False
def canCloseShape(self):
return self.drawing() and self.current and len(self.current) > 2
def mouseDoubleClickEvent(self, ev):
# We need at least 4 points here, since the mousePress handler
# adds an extra one before this handler is called.
if self.canCloseShape() and len(self.current) > 3:
self.current.popPoint()
self.finalise()
def selectShape(self, shape):
self.deSelectShape()
shape.selected = True
self.selectedShape = shape
self.setHiding()
self.selectionChanged.emit(True)
self.update()
def selectShapePoint(self, point):
"""Select the first shape created which contains this point."""
self.deSelectShape()
if self.selectedVertex(): # A vertex is marked for selection.
index, shape = self.hVertex, self.hShape
shape.highlightVertex(index, shape.MOVE_VERTEX)
self.selectShape(shape)
return
for shape in reversed(self.shapes):
if self.isVisible(shape) and shape.containsPoint(point):
self.selectShape(shape)
self.calculateOffsets(shape, point)
return
def calculateOffsets(self, shape, point):
rect = shape.boundingRect()
x1 = rect.x() - point.x()
y1 = rect.y() - point.y()
x2 = (rect.x() + rect.width()) - point.x()
y2 = (rect.y() + rect.height()) - point.y()
self.offsets = QPointF(x1, y1), QPointF(x2, y2)
def boundedMoveVertex(self, pos):
index, shape = self.hVertex, self.hShape
point = shape[index]
if self.outOfPixmap(pos):
pos = self.intersectionPoint(point, pos)
shiftPos = pos - point
shape.moveVertexBy(index, shiftPos)
lindex = (index + 1) % 4
rindex = (index + 3) % 4
lshift = None
rshift = None
if index % 2 == 0:
rshift = QPointF(shiftPos.x(), 0)
lshift = QPointF(0, shiftPos.y())
else:
lshift = QPointF(shiftPos.x(), 0)
rshift = QPointF(0, shiftPos.y())
shape.moveVertexBy(rindex, rshift)
shape.moveVertexBy(lindex, lshift)
def boundedMoveShape(self, shape, pos):
if self.outOfPixmap(pos):
return False # No need to move
o1 = pos + self.offsets[0]
if self.outOfPixmap(o1):
pos -= QPointF(min(0, o1.x()), min(0, o1.y()))
o2 = pos + self.offsets[1]
if self.outOfPixmap(o2):
pos += QPointF(min(0, self.pixmap.width() - o2.x()),
min(0, self.pixmap.height() - o2.y()))
# The next line tracks the new position of the cursor
# relative to the shape, but also results in making it
# a bit "shaky" when nearing the border and allows it to
# go outside of the shape's area for some reason. XXX
#self.calculateOffsets(self.selectedShape, pos)
dp = pos - self.prevPoint
if dp:
shape.moveBy(dp)
self.prevPoint = pos
return True
return False
def deSelectShape(self):
if self.selectedShape:
self.selectedShape.selected = False
self.selectedShape = None
self.setHiding(False)
self.selectionChanged.emit(False)
self.update()
def deleteSelected(self):
if self.selectedShape:
shape = self.selectedShape
self.shapes.remove(self.selectedShape)
self.selectedShape = None
self.update()
return shape
def copySelectedShape(self):
if self.selectedShape:
shape = self.selectedShape.copy()
self.deSelectShape()
self.shapes.append(shape)
shape.selected = True
self.selectedShape = shape
self.boundedShiftShape(shape)
return shape
def boundedShiftShape(self, shape):
# Try to move in one direction, and if it fails in another.
# Give up if both fail.
point = shape[0]
offset = QPointF(2.0, 2.0)
self.calculateOffsets(shape, point)
self.prevPoint = point
if not self.boundedMoveShape(shape, point - offset):
self.boundedMoveShape(shape, point + offset)
def paintEvent(self, event):
if not self.pixmap:
return super(Canvas, self).paintEvent(event)
p = self._painter
p.begin(self)
p.setRenderHint(QPainter.Antialiasing)
p.setRenderHint(QPainter.HighQualityAntialiasing)
p.setRenderHint(QPainter.SmoothPixmapTransform)
p.scale(self.scale, self.scale)
p.translate(self.offsetToCenter())
p.drawPixmap(0, 0, self.pixmap)
Shape.scale = self.scale
for shape in self.shapes:
if (shape.selected or not self._hideBackround) and self.isVisible(shape):
shape.fill = shape.selected or shape == self.hShape
shape.paint(p)
if self.current:
self.current.paint(p)
self.line.paint(p)
if self.selectedShapeCopy:
self.selectedShapeCopy.paint(p)
# Paint rect
if self.current is not None and len(self.line) == 2:
leftTop = self.line[0]
rightBottom = self.line[1]
rectWidth = rightBottom.x() - leftTop.x()
rectHeight = rightBottom.y() - leftTop.y()
p.setPen(self.drawingRectColor)
brush = QBrush(Qt.BDiagPattern)
p.setBrush(brush)
p.drawRect(leftTop.x(), leftTop.y(), rectWidth, rectHeight)
if self.drawing() and not self.prevPoint.isNull() and not self.outOfPixmap(self.prevPoint):
p.setPen(QColor(0, 0, 0))
p.drawLine(self.prevPoint.x(), 0, self.prevPoint.x(), self.pixmap.height())
p.drawLine(0, self.prevPoint.y(), self.pixmap.width(), self.prevPoint.y())
self.setAutoFillBackground(True)
if self.verified:
pal = self.palette()
pal.setColor(self.backgroundRole(), QColor(184, 239, 38, 128))
self.setPalette(pal)
else:
pal = self.palette()
pal.setColor(self.backgroundRole(), QColor(232, 232, 232, 255))
self.setPalette(pal)
p.end()
def transformPos(self, point):
"""Convert from widget-logical coordinates to painter-logical coordinates."""
return point / self.scale - self.offsetToCenter()
def offsetToCenter(self):
s = self.scale
area = super(Canvas, self).size()
w, h = self.pixmap.width() * s, self.pixmap.height() * s
aw, ah = area.width(), area.height()
x = (aw - w) / (2 * s) if aw > w else 0
y = (ah - h) / (2 * s) if ah > h else 0
return QPointF(x, y)
def outOfPixmap(self, p):
w, h = self.pixmap.width(), self.pixmap.height()
return not (0 <= p.x() <= w and 0 <= p.y() <= h)
def finalise(self):
assert self.current
if self.current.points[0] == self.current.points[-1]:
self.current = None
self.drawingPolygon.emit(False)
self.update()
return
self.current.close()
self.shapes.append(self.current)
self.current = None
self.setHiding(False)
self.newShape.emit()
self.update()
def closeEnough(self, p1, p2):
#d = distance(p1 - p2)
#m = (p1-p2).manhattanLength()
# print "d %.2f, m %d, %.2f" % (d, m, d - m)
return distance(p1 - p2) < self.epsilon
def intersectionPoint(self, p1, p2):
# Cycle through each image edge in clockwise fashion,
# and find the one intersecting the current line segment.
# http://paulbourke.net/geometry/lineline2d/
size = self.pixmap.size()
points = [(0, 0),
(size.width(), 0),
(size.width(), size.height()),
(0, size.height())]
x1, y1 = p1.x(), p1.y()
x2, y2 = p2.x(), p2.y()
d, i, (x, y) = min(self.intersectingEdges((x1, y1), (x2, y2), points))
x3, y3 = points[i]
x4, y4 = points[(i + 1) % 4]
if (x, y) == (x1, y1):
# Handle cases where previous point is on one of the edges.
if x3 == x4:
return QPointF(x3, min(max(0, y2), max(y3, y4)))
else: # y3 == y4
return QPointF(min(max(0, x2), max(x3, x4)), y3)
return QPointF(x, y)
def intersectingEdges(self, x1y1, x2y2, points):
"""For each edge formed by `points', yield the intersection
with the line segment `(x1,y1) - (x2,y2)`, if it exists.
Also return the distance of `(x2,y2)' to the middle of the
edge along with its index, so that the one closest can be chosen."""
x1, y1 = x1y1
x2, y2 = x2y2
for i in range(4):
x3, y3 = points[i]
x4, y4 = points[(i + 1) % 4]
denom = (y4 - y3) * (x2 - x1) - (x4 - x3) * (y2 - y1)
nua = (x4 - x3) * (y1 - y3) - (y4 - y3) * (x1 - x3)
nub = (x2 - x1) * (y1 - y3) - (y2 - y1) * (x1 - x3)
if denom == 0:
# This covers two cases:
# nua == nub == 0: Coincident
# otherwise: Parallel
continue
ua, ub = nua / denom, nub / denom
if 0 <= ua <= 1 and 0 <= ub <= 1:
x = x1 + ua * (x2 - x1)
y = y1 + ua * (y2 - y1)
m = QPointF((x3 + x4) / 2, (y3 + y4) / 2)
d = distance(m - QPointF(x2, y2))
yield d, i, (x, y)
# These two, along with a call to adjustSize are required for the
# scroll area.
def sizeHint(self):
return self.minimumSizeHint()
def minimumSizeHint(self):
if self.pixmap:
return self.scale * self.pixmap.size()
return super(Canvas, self).minimumSizeHint()
def wheelEvent(self, ev):
qt_version = 4 if hasattr(ev, "delta") else 5
if qt_version == 4:
if ev.orientation() == Qt.Vertical:
v_delta = ev.delta()
h_delta = 0
else:
h_delta = ev.delta()
v_delta = 0
else:
delta = ev.angleDelta()
h_delta = delta.x()
v_delta = delta.y()
mods = ev.modifiers()
if Qt.ControlModifier == int(mods) and v_delta:
self.zoomRequest.emit(v_delta)
else:
v_delta and self.scrollRequest.emit(v_delta, Qt.Vertical)
h_delta and self.scrollRequest.emit(h_delta, Qt.Horizontal)
ev.accept()
def keyPressEvent(self, ev):
key = ev.key()
if key == Qt.Key_Escape and self.current:
print('ESC press')
self.current = None
self.drawingPolygon.emit(False)
self.update()
elif key == Qt.Key_Return and self.canCloseShape():
self.finalise()
elif key == Qt.Key_Left and self.selectedShape:
self.moveOnePixel('Left')
elif key == Qt.Key_Right and self.selectedShape:
self.moveOnePixel('Right')
elif key == Qt.Key_Up and self.selectedShape:
self.moveOnePixel('Up')
elif key == Qt.Key_Down and self.selectedShape:
self.moveOnePixel('Down')
def moveOnePixel(self, direction):
# print(self.selectedShape.points)
if direction == 'Left' and not self.moveOutOfBound(QPointF(-1.0, 0)):
# print("move Left one pixel")
self.selectedShape.points[0] += QPointF(-1.0, 0)
self.selectedShape.points[1] += QPointF(-1.0, 0)
self.selectedShape.points[2] += QPointF(-1.0, 0)
self.selectedShape.points[3] += QPointF(-1.0, 0)
elif direction == 'Right' and not self.moveOutOfBound(QPointF(1.0, 0)):
# print("move Right one pixel")
self.selectedShape.points[0] += QPointF(1.0, 0)
self.selectedShape.points[1] += QPointF(1.0, 0)
self.selectedShape.points[2] += QPointF(1.0, 0)
self.selectedShape.points[3] += QPointF(1.0, 0)
elif direction == 'Up' and not self.moveOutOfBound(QPointF(0, -1.0)):
# print("move Up one pixel")
self.selectedShape.points[0] += QPointF(0, -1.0)
self.selectedShape.points[1] += QPointF(0, -1.0)
self.selectedShape.points[2] += QPointF(0, -1.0)
self.selectedShape.points[3] += QPointF(0, -1.0)
elif direction == 'Down' and not self.moveOutOfBound(QPointF(0, 1.0)):
# print("move Down one pixel")
self.selectedShape.points[0] += QPointF(0, 1.0)
self.selectedShape.points[1] += QPointF(0, 1.0)
self.selectedShape.points[2] += QPointF(0, 1.0)
self.selectedShape.points[3] += QPointF(0, 1.0)
self.shapeMoved.emit()
self.repaint()
def moveOutOfBound(self, step):
points = [p1+p2 for p1, p2 in zip(self.selectedShape.points, [step]*4)]
return True in map(self.outOfPixmap, points)
def setLastLabel(self, text, line_color = None, fill_color = None):
assert text
self.shapes[-1].label = text
if line_color:
self.shapes[-1].line_color = line_color
if fill_color:
self.shapes[-1].fill_color = fill_color
return self.shapes[-1]
def undoLastLine(self):
assert self.shapes
self.current = self.shapes.pop()
self.current.setOpen()
self.line.points = [self.current[-1], self.current[0]]
self.drawingPolygon.emit(True)
def resetAllLines(self):
assert self.shapes
self.current = self.shapes.pop()
self.current.setOpen()
self.line.points = [self.current[-1], self.current[0]]
self.drawingPolygon.emit(True)
self.current = None
self.drawingPolygon.emit(False)
self.update()
def loadPixmap(self, pixmap):
self.pixmap = pixmap
self.shapes = []
self.repaint()
def loadShapes(self, shapes):
self.shapes = list(shapes)
self.current = None
self.repaint()
def setShapeVisible(self, shape, value):
self.visible[shape] = value
self.repaint()
def currentCursor(self):
cursor = QApplication.overrideCursor()
if cursor is not None:
cursor = cursor.shape()
return cursor
def overrideCursor(self, cursor):
self._cursor = cursor
if self.currentCursor() is None:
QApplication.setOverrideCursor(cursor)
else:
QApplication.changeOverrideCursor(cursor)
def restoreCursor(self):
QApplication.restoreOverrideCursor()
def resetState(self):
self.restoreCursor()
self.pixmap = None
self.update()
| 36.601757 | 99 | 0.563903 |
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from libs.shape import Shape
from libs.lib import distance
CURSOR_DEFAULT = Qt.ArrowCursor
CURSOR_POINT = Qt.PointingHandCursor
CURSOR_DRAW = Qt.CrossCursor
CURSOR_MOVE = Qt.ClosedHandCursor
CURSOR_GRAB = Qt.OpenHandCursor
class Canvas(QWidget):
zoomRequest = pyqtSignal(int)
scrollRequest = pyqtSignal(int, int)
newShape = pyqtSignal()
selectionChanged = pyqtSignal(bool)
shapeMoved = pyqtSignal()
drawingPolygon = pyqtSignal(bool)
CREATE, EDIT = list(range(2))
epsilon = 11.0
def __init__(self, *args, **kwargs):
super(Canvas, self).__init__(*args, **kwargs)
self.mode = self.EDIT
self.shapes = []
self.current = None
self.selectedShape = None
self.selectedShapeCopy = None
self.drawingLineColor = QColor(0, 0, 255)
self.drawingRectColor = QColor(0, 0, 255)
self.line = Shape(line_color=self.drawingLineColor)
self.prevPoint = QPointF()
self.offsets = QPointF(), QPointF()
self.scale = 1.0
self.pixmap = QPixmap()
self.visible = {}
self._hideBackround = False
self.hideBackround = False
self.hShape = None
self.hVertex = None
self._painter = QPainter()
self._cursor = CURSOR_DEFAULT
self.menus = (QMenu(), QMenu())
self.setMouseTracking(True)
self.setFocusPolicy(Qt.WheelFocus)
self.verified = False
def setDrawingColor(self, qColor):
self.drawingLineColor = qColor
self.drawingRectColor = qColor
def enterEvent(self, ev):
self.overrideCursor(self._cursor)
def leaveEvent(self, ev):
self.restoreCursor()
def focusOutEvent(self, ev):
self.restoreCursor()
def isVisible(self, shape):
return self.visible.get(shape, True)
def drawing(self):
return self.mode == self.CREATE
def editing(self):
return self.mode == self.EDIT
def setEditing(self, value=True):
self.mode = self.EDIT if value else self.CREATE
if not value:
self.unHighlight()
self.deSelectShape()
self.prevPoint = QPointF()
self.repaint()
def unHighlight(self):
if self.hShape:
self.hShape.highlightClear()
self.hVertex = self.hShape = None
def selectedVertex(self):
return self.hVertex is not None
def mouseMoveEvent(self, ev):
pos = self.transformPos(ev.pos())
window = self.parent().window()
if window.filePath is not None:
self.parent().window().labelCoordinates.setText(
'X: %d; Y: %d' % (pos.x(), pos.y()))
if self.drawing():
self.overrideCursor(CURSOR_DRAW)
if self.current:
color = self.drawingLineColor
if self.outOfPixmap(pos):
# Project the point to the pixmap's edges.
pos = self.intersectionPoint(self.current[-1], pos)
elif len(self.current) > 1 and self.closeEnough(pos, self.current[0]):
pos = self.current[0]
color = self.current.line_color
self.overrideCursor(CURSOR_POINT)
self.current.highlightVertex(0, Shape.NEAR_VERTEX)
self.line[1] = pos
self.line.line_color = color
self.prevPoint = QPointF()
self.current.highlightClear()
else:
self.prevPoint = pos
self.repaint()
return
if Qt.RightButton & ev.buttons():
if self.selectedShapeCopy and self.prevPoint:
self.overrideCursor(CURSOR_MOVE)
self.boundedMoveShape(self.selectedShapeCopy, pos)
self.repaint()
elif self.selectedShape:
self.selectedShapeCopy = self.selectedShape.copy()
self.repaint()
return
if Qt.LeftButton & ev.buttons():
if self.selectedVertex():
self.boundedMoveVertex(pos)
self.shapeMoved.emit()
self.repaint()
elif self.selectedShape and self.prevPoint:
self.overrideCursor(CURSOR_MOVE)
self.boundedMoveShape(self.selectedShape, pos)
self.shapeMoved.emit()
self.repaint()
return
self.setToolTip("Image")
for shape in reversed([s for s in self.shapes if self.isVisible(s)]):
index = shape.nearestVertex(pos, self.epsilon)
if index is not None:
if self.selectedVertex():
self.hShape.highlightClear()
self.hVertex, self.hShape = index, shape
shape.highlightVertex(index, shape.MOVE_VERTEX)
self.overrideCursor(CURSOR_POINT)
self.setToolTip("Click & drag to move point")
self.setStatusTip(self.toolTip())
self.update()
break
elif shape.containsPoint(pos):
if self.selectedVertex():
self.hShape.highlightClear()
self.hVertex, self.hShape = None, shape
self.setToolTip(
"Click & drag to move shape '%s'" % shape.label)
self.setStatusTip(self.toolTip())
self.overrideCursor(CURSOR_GRAB)
self.update()
break
else:
if self.hShape:
self.hShape.highlightClear()
self.update()
self.hVertex, self.hShape = None, None
self.overrideCursor(CURSOR_DEFAULT)
def mousePressEvent(self, ev):
pos = self.transformPos(ev.pos())
if ev.button() == Qt.LeftButton:
if self.drawing():
self.handleDrawing(pos)
else:
self.selectShapePoint(pos)
self.prevPoint = pos
self.repaint()
elif ev.button() == Qt.RightButton and self.editing():
self.selectShapePoint(pos)
self.prevPoint = pos
self.repaint()
def mouseReleaseEvent(self, ev):
if ev.button() == Qt.RightButton:
menu = self.menus[bool(self.selectedShapeCopy)]
self.restoreCursor()
if not menu.exec_(self.mapToGlobal(ev.pos()))\
and self.selectedShapeCopy:
self.selectedShapeCopy = None
self.repaint()
elif ev.button() == Qt.LeftButton and self.selectedShape:
if self.selectedVertex():
self.overrideCursor(CURSOR_POINT)
else:
self.overrideCursor(CURSOR_GRAB)
elif ev.button() == Qt.LeftButton:
pos = self.transformPos(ev.pos())
if self.drawing():
self.handleDrawing(pos)
def endMove(self, copy=False):
assert self.selectedShape and self.selectedShapeCopy
shape = self.selectedShapeCopy
if copy:
self.shapes.append(shape)
self.selectedShape.selected = False
self.selectedShape = shape
self.repaint()
else:
self.selectedShape.points = [p for p in shape.points]
self.selectedShapeCopy = None
def hideBackroundShapes(self, value):
self.hideBackround = value
if self.selectedShape:
self.setHiding(True)
self.repaint()
def handleDrawing(self, pos):
if self.current and (self.current.reachMaxPoints() is False):
initPos = self.current[0]
minX = initPos.x()
minY = initPos.y()
targetPos = self.line[1]
maxX = targetPos.x()
maxY = targetPos.y()
self.current.addPoint(QPointF(maxX, minY))
self.current.addPoint(targetPos)
self.current.addPoint(QPointF(minX, maxY))
self.finalise()
elif not self.outOfPixmap(pos):
self.current = Shape()
self.current.addPoint(pos)
self.line.points = [pos, pos]
self.setHiding()
self.drawingPolygon.emit(True)
self.update()
def setHiding(self, enable=True):
self._hideBackround = self.hideBackround if enable else False
def canCloseShape(self):
return self.drawing() and self.current and len(self.current) > 2
def mouseDoubleClickEvent(self, ev):
if self.canCloseShape() and len(self.current) > 3:
self.current.popPoint()
self.finalise()
def selectShape(self, shape):
self.deSelectShape()
shape.selected = True
self.selectedShape = shape
self.setHiding()
self.selectionChanged.emit(True)
self.update()
def selectShapePoint(self, point):
self.deSelectShape()
if self.selectedVertex():
index, shape = self.hVertex, self.hShape
shape.highlightVertex(index, shape.MOVE_VERTEX)
self.selectShape(shape)
return
for shape in reversed(self.shapes):
if self.isVisible(shape) and shape.containsPoint(point):
self.selectShape(shape)
self.calculateOffsets(shape, point)
return
def calculateOffsets(self, shape, point):
rect = shape.boundingRect()
x1 = rect.x() - point.x()
y1 = rect.y() - point.y()
x2 = (rect.x() + rect.width()) - point.x()
y2 = (rect.y() + rect.height()) - point.y()
self.offsets = QPointF(x1, y1), QPointF(x2, y2)
def boundedMoveVertex(self, pos):
index, shape = self.hVertex, self.hShape
point = shape[index]
if self.outOfPixmap(pos):
pos = self.intersectionPoint(point, pos)
shiftPos = pos - point
shape.moveVertexBy(index, shiftPos)
lindex = (index + 1) % 4
rindex = (index + 3) % 4
lshift = None
rshift = None
if index % 2 == 0:
rshift = QPointF(shiftPos.x(), 0)
lshift = QPointF(0, shiftPos.y())
else:
lshift = QPointF(shiftPos.x(), 0)
rshift = QPointF(0, shiftPos.y())
shape.moveVertexBy(rindex, rshift)
shape.moveVertexBy(lindex, lshift)
def boundedMoveShape(self, shape, pos):
if self.outOfPixmap(pos):
return False
o1 = pos + self.offsets[0]
if self.outOfPixmap(o1):
pos -= QPointF(min(0, o1.x()), min(0, o1.y()))
o2 = pos + self.offsets[1]
if self.outOfPixmap(o2):
pos += QPointF(min(0, self.pixmap.width() - o2.x()),
min(0, self.pixmap.height() - o2.y()))
#self.calculateOffsets(self.selectedShape, pos)
dp = pos - self.prevPoint
if dp:
shape.moveBy(dp)
self.prevPoint = pos
return True
return False
def deSelectShape(self):
if self.selectedShape:
self.selectedShape.selected = False
self.selectedShape = None
self.setHiding(False)
self.selectionChanged.emit(False)
self.update()
def deleteSelected(self):
if self.selectedShape:
shape = self.selectedShape
self.shapes.remove(self.selectedShape)
self.selectedShape = None
self.update()
return shape
def copySelectedShape(self):
if self.selectedShape:
shape = self.selectedShape.copy()
self.deSelectShape()
self.shapes.append(shape)
shape.selected = True
self.selectedShape = shape
self.boundedShiftShape(shape)
return shape
def boundedShiftShape(self, shape):
# Try to move in one direction, and if it fails in another.
# Give up if both fail.
point = shape[0]
offset = QPointF(2.0, 2.0)
self.calculateOffsets(shape, point)
self.prevPoint = point
if not self.boundedMoveShape(shape, point - offset):
self.boundedMoveShape(shape, point + offset)
def paintEvent(self, event):
if not self.pixmap:
return super(Canvas, self).paintEvent(event)
p = self._painter
p.begin(self)
p.setRenderHint(QPainter.Antialiasing)
p.setRenderHint(QPainter.HighQualityAntialiasing)
p.setRenderHint(QPainter.SmoothPixmapTransform)
p.scale(self.scale, self.scale)
p.translate(self.offsetToCenter())
p.drawPixmap(0, 0, self.pixmap)
Shape.scale = self.scale
for shape in self.shapes:
if (shape.selected or not self._hideBackround) and self.isVisible(shape):
shape.fill = shape.selected or shape == self.hShape
shape.paint(p)
if self.current:
self.current.paint(p)
self.line.paint(p)
if self.selectedShapeCopy:
self.selectedShapeCopy.paint(p)
# Paint rect
if self.current is not None and len(self.line) == 2:
leftTop = self.line[0]
rightBottom = self.line[1]
rectWidth = rightBottom.x() - leftTop.x()
rectHeight = rightBottom.y() - leftTop.y()
p.setPen(self.drawingRectColor)
brush = QBrush(Qt.BDiagPattern)
p.setBrush(brush)
p.drawRect(leftTop.x(), leftTop.y(), rectWidth, rectHeight)
if self.drawing() and not self.prevPoint.isNull() and not self.outOfPixmap(self.prevPoint):
p.setPen(QColor(0, 0, 0))
p.drawLine(self.prevPoint.x(), 0, self.prevPoint.x(), self.pixmap.height())
p.drawLine(0, self.prevPoint.y(), self.pixmap.width(), self.prevPoint.y())
self.setAutoFillBackground(True)
if self.verified:
pal = self.palette()
pal.setColor(self.backgroundRole(), QColor(184, 239, 38, 128))
self.setPalette(pal)
else:
pal = self.palette()
pal.setColor(self.backgroundRole(), QColor(232, 232, 232, 255))
self.setPalette(pal)
p.end()
def transformPos(self, point):
return point / self.scale - self.offsetToCenter()
def offsetToCenter(self):
s = self.scale
area = super(Canvas, self).size()
w, h = self.pixmap.width() * s, self.pixmap.height() * s
aw, ah = area.width(), area.height()
x = (aw - w) / (2 * s) if aw > w else 0
y = (ah - h) / (2 * s) if ah > h else 0
return QPointF(x, y)
def outOfPixmap(self, p):
w, h = self.pixmap.width(), self.pixmap.height()
return not (0 <= p.x() <= w and 0 <= p.y() <= h)
def finalise(self):
assert self.current
if self.current.points[0] == self.current.points[-1]:
self.current = None
self.drawingPolygon.emit(False)
self.update()
return
self.current.close()
self.shapes.append(self.current)
self.current = None
self.setHiding(False)
self.newShape.emit()
self.update()
def closeEnough(self, p1, p2):
#d = distance(p1 - p2)
#m = (p1-p2).manhattanLength()
# print "d %.2f, m %d, %.2f" % (d, m, d - m)
return distance(p1 - p2) < self.epsilon
def intersectionPoint(self, p1, p2):
# Cycle through each image edge in clockwise fashion,
# and find the one intersecting the current line segment.
# http://paulbourke.net/geometry/lineline2d/
size = self.pixmap.size()
points = [(0, 0),
(size.width(), 0),
(size.width(), size.height()),
(0, size.height())]
x1, y1 = p1.x(), p1.y()
x2, y2 = p2.x(), p2.y()
d, i, (x, y) = min(self.intersectingEdges((x1, y1), (x2, y2), points))
x3, y3 = points[i]
x4, y4 = points[(i + 1) % 4]
if (x, y) == (x1, y1):
# Handle cases where previous point is on one of the edges.
if x3 == x4:
return QPointF(x3, min(max(0, y2), max(y3, y4)))
else: # y3 == y4
return QPointF(min(max(0, x2), max(x3, x4)), y3)
return QPointF(x, y)
def intersectingEdges(self, x1y1, x2y2, points):
x1, y1 = x1y1
x2, y2 = x2y2
for i in range(4):
x3, y3 = points[i]
x4, y4 = points[(i + 1) % 4]
denom = (y4 - y3) * (x2 - x1) - (x4 - x3) * (y2 - y1)
nua = (x4 - x3) * (y1 - y3) - (y4 - y3) * (x1 - x3)
nub = (x2 - x1) * (y1 - y3) - (y2 - y1) * (x1 - x3)
if denom == 0:
# This covers two cases:
# nua == nub == 0: Coincident
# otherwise: Parallel
continue
ua, ub = nua / denom, nub / denom
if 0 <= ua <= 1 and 0 <= ub <= 1:
x = x1 + ua * (x2 - x1)
y = y1 + ua * (y2 - y1)
m = QPointF((x3 + x4) / 2, (y3 + y4) / 2)
d = distance(m - QPointF(x2, y2))
yield d, i, (x, y)
# These two, along with a call to adjustSize are required for the
# scroll area.
def sizeHint(self):
return self.minimumSizeHint()
def minimumSizeHint(self):
if self.pixmap:
return self.scale * self.pixmap.size()
return super(Canvas, self).minimumSizeHint()
def wheelEvent(self, ev):
qt_version = 4 if hasattr(ev, "delta") else 5
if qt_version == 4:
if ev.orientation() == Qt.Vertical:
v_delta = ev.delta()
h_delta = 0
else:
h_delta = ev.delta()
v_delta = 0
else:
delta = ev.angleDelta()
h_delta = delta.x()
v_delta = delta.y()
mods = ev.modifiers()
if Qt.ControlModifier == int(mods) and v_delta:
self.zoomRequest.emit(v_delta)
else:
v_delta and self.scrollRequest.emit(v_delta, Qt.Vertical)
h_delta and self.scrollRequest.emit(h_delta, Qt.Horizontal)
ev.accept()
def keyPressEvent(self, ev):
key = ev.key()
if key == Qt.Key_Escape and self.current:
print('ESC press')
self.current = None
self.drawingPolygon.emit(False)
self.update()
elif key == Qt.Key_Return and self.canCloseShape():
self.finalise()
elif key == Qt.Key_Left and self.selectedShape:
self.moveOnePixel('Left')
elif key == Qt.Key_Right and self.selectedShape:
self.moveOnePixel('Right')
elif key == Qt.Key_Up and self.selectedShape:
self.moveOnePixel('Up')
elif key == Qt.Key_Down and self.selectedShape:
self.moveOnePixel('Down')
def moveOnePixel(self, direction):
# print(self.selectedShape.points)
if direction == 'Left' and not self.moveOutOfBound(QPointF(-1.0, 0)):
# print("move Left one pixel")
self.selectedShape.points[0] += QPointF(-1.0, 0)
self.selectedShape.points[1] += QPointF(-1.0, 0)
self.selectedShape.points[2] += QPointF(-1.0, 0)
self.selectedShape.points[3] += QPointF(-1.0, 0)
elif direction == 'Right' and not self.moveOutOfBound(QPointF(1.0, 0)):
# print("move Right one pixel")
self.selectedShape.points[0] += QPointF(1.0, 0)
self.selectedShape.points[1] += QPointF(1.0, 0)
self.selectedShape.points[2] += QPointF(1.0, 0)
self.selectedShape.points[3] += QPointF(1.0, 0)
elif direction == 'Up' and not self.moveOutOfBound(QPointF(0, -1.0)):
# print("move Up one pixel")
self.selectedShape.points[0] += QPointF(0, -1.0)
self.selectedShape.points[1] += QPointF(0, -1.0)
self.selectedShape.points[2] += QPointF(0, -1.0)
self.selectedShape.points[3] += QPointF(0, -1.0)
elif direction == 'Down' and not self.moveOutOfBound(QPointF(0, 1.0)):
# print("move Down one pixel")
self.selectedShape.points[0] += QPointF(0, 1.0)
self.selectedShape.points[1] += QPointF(0, 1.0)
self.selectedShape.points[2] += QPointF(0, 1.0)
self.selectedShape.points[3] += QPointF(0, 1.0)
self.shapeMoved.emit()
self.repaint()
def moveOutOfBound(self, step):
points = [p1+p2 for p1, p2 in zip(self.selectedShape.points, [step]*4)]
return True in map(self.outOfPixmap, points)
def setLastLabel(self, text, line_color = None, fill_color = None):
assert text
self.shapes[-1].label = text
if line_color:
self.shapes[-1].line_color = line_color
if fill_color:
self.shapes[-1].fill_color = fill_color
return self.shapes[-1]
def undoLastLine(self):
assert self.shapes
self.current = self.shapes.pop()
self.current.setOpen()
self.line.points = [self.current[-1], self.current[0]]
self.drawingPolygon.emit(True)
def resetAllLines(self):
assert self.shapes
self.current = self.shapes.pop()
self.current.setOpen()
self.line.points = [self.current[-1], self.current[0]]
self.drawingPolygon.emit(True)
self.current = None
self.drawingPolygon.emit(False)
self.update()
def loadPixmap(self, pixmap):
self.pixmap = pixmap
self.shapes = []
self.repaint()
def loadShapes(self, shapes):
self.shapes = list(shapes)
self.current = None
self.repaint()
def setShapeVisible(self, shape, value):
self.visible[shape] = value
self.repaint()
def currentCursor(self):
cursor = QApplication.overrideCursor()
if cursor is not None:
cursor = cursor.shape()
return cursor
def overrideCursor(self, cursor):
self._cursor = cursor
if self.currentCursor() is None:
QApplication.setOverrideCursor(cursor)
else:
QApplication.changeOverrideCursor(cursor)
def restoreCursor(self):
QApplication.restoreOverrideCursor()
def resetState(self):
self.restoreCursor()
self.pixmap = None
self.update()
| true | true |
f72e68844e89c751c604cdf5b912da555e19a617 | 981 | py | Python | setup.py | cnpls/pyords | 249591e96031cb2c49045a9984fd9c5f50ef757d | [
"MIT"
] | null | null | null | setup.py | cnpls/pyords | 249591e96031cb2c49045a9984fd9c5f50ef757d | [
"MIT"
] | null | null | null | setup.py | cnpls/pyords | 249591e96031cb2c49045a9984fd9c5f50ef757d | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from pyords import __version__
import os
currdir = os.path.dirname(os.path.abspath(__file__))
long_description = ''
with open(os.path.join(currdir, 'README.md')) as f:
long_description = f.read()
install_requires = []
with open(os.path.join(currdir, 'requirements.txt')) as f:
install_requires = f.read().splitlines()
setup(name='pyords',
version=__version__,
description='A python package for operations research and data science problems.',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/christopherpryer/pyords',
author='Chris Pryer',
author_email='christophpryer@gmail.com',
license='PUBLIC',
packages=find_packages(),
install_requires=install_requires,
entry_points ={
'console_scripts': [
'pyords = pyords:cli'
]
},
zip_safe=False) | 31.645161 | 87 | 0.673802 | from setuptools import setup, find_packages
from pyords import __version__
import os
currdir = os.path.dirname(os.path.abspath(__file__))
long_description = ''
with open(os.path.join(currdir, 'README.md')) as f:
long_description = f.read()
install_requires = []
with open(os.path.join(currdir, 'requirements.txt')) as f:
install_requires = f.read().splitlines()
setup(name='pyords',
version=__version__,
description='A python package for operations research and data science problems.',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/christopherpryer/pyords',
author='Chris Pryer',
author_email='christophpryer@gmail.com',
license='PUBLIC',
packages=find_packages(),
install_requires=install_requires,
entry_points ={
'console_scripts': [
'pyords = pyords:cli'
]
},
zip_safe=False) | true | true |
f72e6941d55bd0dffbd13951538964bc9cda8790 | 3,862 | py | Python | sequana/iuapc.py | khourhin/sequana | c56d4490b7c8edc8a0f63fd60578eb72ed64f1b5 | [
"BSD-3-Clause"
] | 138 | 2016-07-13T06:24:45.000Z | 2022-03-28T13:12:03.000Z | sequana/iuapc.py | khourhin/sequana | c56d4490b7c8edc8a0f63fd60578eb72ed64f1b5 | [
"BSD-3-Clause"
] | 655 | 2016-03-10T17:33:40.000Z | 2022-03-30T16:10:45.000Z | sequana/iuapc.py | khourhin/sequana | c56d4490b7c8edc8a0f63fd60578eb72ed64f1b5 | [
"BSD-3-Clause"
] | 39 | 2016-11-04T11:40:58.000Z | 2022-03-15T08:12:29.000Z | # -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <thomas.cokelaer@pasteur.fr>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
import colorlog
logger = colorlog.getLogger(__name__)
"""
There are 20 amino acids (Stored in :ref:`amino_acids`) such as alanine.
Each of them has a name, a 3-letter codes and a one-letter codes (the IUAPC
code, after the International Union of Pure and Applied chemistry committee).
`ref:`amino_acids is a dictionary with keys as the one-letter code. The values
are tuples with the 3-letter code and full name.
amino_acids['A'] returns ('Ala', 'Alanine')
Som a protein can be written as a succession of letters made of the keys of the
dictionary. It is then easy to check the validity of a protein sequence.
"""
# DNA bases
dna_bases = ("A", "C", "T", "G")
# DNA bases names
dna_bases_names = {
"A": "Adenine",
"T": "Thymidine",
"U": "Uridine",
"G": "Guanidine",
"C": "Cytidine",
"Y": "pYrimidine",
"R": "puRine",
"S": "Strong",
"W": "Weak",
"K": "Keto",
"M": "aMino",
"B": "not A",
"D": "not C",
"H": "not G",
"V": "not T/U",
"N": "Unknown"}
# DNA bases represented
dna_ambiguities = {
"A": "A",
"C": "C",
"G": "G",
"T": "T",
"R": "[GA]",
"Y": "[CT]",
"M": "[AC]",
"K": "[GT]",
"S": "[GC]",
"W": "[AT]",
"N": "[ACGT]",
"B": "[CGT]",
"D": "[AGT]",
"H": "[ACT]",
"V": "[ACG]"
}
# IUPAC degeneracies. Complementary bases
dna_complement = {
'A': 'T',
'B': 'V',
'C': 'G',
'D': 'H',
'G': 'C',
'H': 'D',
'K': 'M',
'M': 'K',
'N': 'N',
'R': 'Y',
'S': 'S',
'T': 'A',
'V': 'B',
'W': 'W',
'X': 'X',
'Y': 'R'}
codons = {
"UUU":"F", "UUC":"F","UUA":"L", "UUG":"L",
"CUU":"L", "CUC":"L","CUA":"L", "CUG":"L",
"AUU":"I", "AUC":"I","AUA":"I", "AUG":"M",
"GUU":"V", "GUC":"V","GUA":"V", "GUG":"V",
"UCU":"S", "UCC":"S","UCA":"S", "UCG":"S",
"CCU":"P", "ACC":"P","CCA":"P", "CCG":"P",
"ACU":"T", "ACC":"T","ACA":"T", "ACG":"T",
"GCU":"A", "GCC":"A","GCA":"A", "GCG":"A",
"UAU":"Y", "UAC":"Y","UAA":"*", "UAG":"*",
"CAU":"H", "CAC":"H","CAA":"Q", "CAG":"Q",
"AAU":"N", "AAC":"N","AAA":"K", "AAG":"K",
"GAU":"D", "GAC":"D","GAA":"E", "GAG":"E",
"UGU":"C", "UGC":"C","UGA":"*", "UGG":"W",
"CGU":"R", "CGC":"R","CGA":"R", "CGG":"R",
"AGU":"S", "AGC":"S","AGA":"R", "AGG":"R",
"GGU":"G", "GGC":"G","GGA":"G", "GGG":"G",
}
amino_acids = {
"A": ('Ala', 'Alanine'),
"R": ('Arg', 'Arginine'),
"N": ('Asn', 'Asparagine'),
"D": ('Asp', 'Aspartic acid'),
"C": ('Cys', 'Cysteine'),
"Q": ('Gln', 'Glutamine'),
"E": ('Glu', 'Glutamic acid'),
"G": ('Gly', 'Glycine'),
"H": ('His', 'Histidine'),
"I": ('lle', 'Isoleucine'),
"L": ('Leu', 'Leucine'),
"K": ('Lys', 'Lysine'),
"M": ('Met', 'Methionine'),
"F": ('Phe', 'Pheline'),
"P": ('Pro', 'Proline'),
"S": ('Ser', 'Serine'),
"T": ('Thr', 'Threonine'),
"W": ('Trp', 'Tryptophan'),
"Y": ('Tyr', 'Tyrosine'),
"V": ('Val', 'Valine')
}
# B and Z codes indicated ambiguous amino acd
# Pyl and Sec are specified by the UAG (Pyl) and UGA (Sec) stop codons in a
# specific context
exotic_amino_acids = {
"B": ("Asn or Asp", "Asparagine or aspartic acis"),
"J": ("Xle", "Isoleucine or leucine"),
"O": ("Pyl", "Pyrrolysine"),
"U": ("Sec", "Selenocysteine"),
"Z": ("Gln or Glu", "Glutamine or glutamic acid"),
"X": ("Xaa", "Any residue"),
"--": ("gap", "gap"),
}
| 23.26506 | 79 | 0.487053 | true | true | |
f72e6942e7acd082f6cba1fa9c3d70a58320e52f | 611 | py | Python | python/math/0204_count_primes.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | 6 | 2019-07-15T13:23:57.000Z | 2020-01-22T03:12:01.000Z | python/math/0204_count_primes.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | null | null | null | python/math/0204_count_primes.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | 1 | 2019-07-24T02:15:31.000Z | 2019-07-24T02:15:31.000Z | import math
class Solution:
def countPrimes(self, n):
"""
:type n: int
:rtype: int
厄拉多塞筛法
比如求20以内质数的个数, 首先0,1不是质数。
2是第一个质数,然后把20以内所有2的倍数划去。
2后面紧跟的数即为下一个质数3,然后把3所有的倍数划去。
3后面紧跟的数即为下一个质数5,再把5所有的倍数划去,以此类推。
"""
if n < 2:
return 0
s = [1] * n
s[0] = s[1] = 0
for i in range(2, int(math.sqrt(n)) + 1):
if s[i] == 1:
s[i * i:n:i] = [0] * int((n - i * i - 1) / i + 1)
return sum(s)
def test_count_primes():
s = Solution()
assert 4 == s.countPrimes(10)
| 21.821429 | 65 | 0.466448 | import math
class Solution:
def countPrimes(self, n):
if n < 2:
return 0
s = [1] * n
s[0] = s[1] = 0
for i in range(2, int(math.sqrt(n)) + 1):
if s[i] == 1:
s[i * i:n:i] = [0] * int((n - i * i - 1) / i + 1)
return sum(s)
def test_count_primes():
s = Solution()
assert 4 == s.countPrimes(10)
| true | true |
f72e697176b5866e0d5c839007d53c1d63f7d40e | 1,348 | py | Python | Algorithms/Easy/840. Magic Squares In Grid/answer.py | KenWoo/Algorithm | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | [
"Apache-2.0"
] | null | null | null | Algorithms/Easy/840. Magic Squares In Grid/answer.py | KenWoo/Algorithm | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | [
"Apache-2.0"
] | null | null | null | Algorithms/Easy/840. Magic Squares In Grid/answer.py | KenWoo/Algorithm | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | [
"Apache-2.0"
] | null | null | null | from typing import List
class Solution:
def numMagicSquaresInside(self, grid: List[List[int]]) -> int:
R = len(grid)
C = len(grid[0])
count = 0
for r in range(R-2):
for c in range(C-2):
r1c1 = grid[r][c]
r1c2 = grid[r][c+1]
r1c3 = grid[r][c+2]
r2c1 = grid[r+1][c]
r2c2 = grid[r+1][c+1]
r2c3 = grid[r+1][c+2]
r3c1 = grid[r+2][c]
r3c2 = grid[r+2][c+1]
r3c3 = grid[r+2][c+2]
result = sorted([r1c1, r1c2, r1c3, r2c1, r2c2,
r2c3, r3c1, r3c2, r3c3]) == list(range(1, 10))
result &= (r1c1 + r1c2 + r1c3 == 15)
result &= (r2c1 + r2c2 + r2c3 == 15)
result &= (r2c1 + r2c2 + r2c3 == 15)
result &= (r1c1 + r2c1 + r3c1 == 15)
result &= (r1c2 + r2c2 + r3c2 == 15)
result &= (r1c3 + r2c3 + r3c3 == 15)
result &= (r1c1 + r2c2 + r3c3 == 15)
result &= (r1c3 + r2c2 + r3c1 == 15)
count += 1 if result else 0
return count
if __name__ == "__main__":
s = Solution()
result = s.numMagicSquaresInside(
[[4, 7, 8], [9, 5, 1], [2, 3, 6]])
print(result)
| 32.095238 | 79 | 0.411721 | from typing import List
class Solution:
def numMagicSquaresInside(self, grid: List[List[int]]) -> int:
R = len(grid)
C = len(grid[0])
count = 0
for r in range(R-2):
for c in range(C-2):
r1c1 = grid[r][c]
r1c2 = grid[r][c+1]
r1c3 = grid[r][c+2]
r2c1 = grid[r+1][c]
r2c2 = grid[r+1][c+1]
r2c3 = grid[r+1][c+2]
r3c1 = grid[r+2][c]
r3c2 = grid[r+2][c+1]
r3c3 = grid[r+2][c+2]
result = sorted([r1c1, r1c2, r1c3, r2c1, r2c2,
r2c3, r3c1, r3c2, r3c3]) == list(range(1, 10))
result &= (r1c1 + r1c2 + r1c3 == 15)
result &= (r2c1 + r2c2 + r2c3 == 15)
result &= (r2c1 + r2c2 + r2c3 == 15)
result &= (r1c1 + r2c1 + r3c1 == 15)
result &= (r1c2 + r2c2 + r3c2 == 15)
result &= (r1c3 + r2c3 + r3c3 == 15)
result &= (r1c1 + r2c2 + r3c3 == 15)
result &= (r1c3 + r2c2 + r3c1 == 15)
count += 1 if result else 0
return count
if __name__ == "__main__":
s = Solution()
result = s.numMagicSquaresInside(
[[4, 7, 8], [9, 5, 1], [2, 3, 6]])
print(result)
| true | true |
f72e69d278b7517f93a9a5030feaaae14e83c6ef | 4,803 | py | Python | energyPATHWAYS/tests/test_time_series.py | jdailey/EnergyPATHWAYS | 0fb0ead475b6395f6b07fc43fe6c85826ee47d0f | [
"MIT"
] | 26 | 2017-02-06T22:57:29.000Z | 2022-03-25T20:02:32.000Z | energyPATHWAYS/tests/test_time_series.py | jdailey/EnergyPATHWAYS | 0fb0ead475b6395f6b07fc43fe6c85826ee47d0f | [
"MIT"
] | 65 | 2016-01-22T01:33:05.000Z | 2016-09-03T14:46:08.000Z | energyPATHWAYS/tests/test_time_series.py | jdailey/EnergyPATHWAYS | 0fb0ead475b6395f6b07fc43fe6c85826ee47d0f | [
"MIT"
] | 11 | 2017-02-24T23:27:36.000Z | 2021-10-18T17:33:37.000Z | # -*- coding: utf-8 -*-
__author__ = 'Ben, Ryan, Michael'
import numpy as np
from collections import defaultdict
import pandas as pd
import energyPATHWAYS
from energyPATHWAYS.time_series import TimeSeries
import unittest
from matplotlib import pyplot as plt
class TestTimeSeries(unittest.TestCase):
def setUp(self):
self.methods = ('linear_interpolation',
'linear_regression',
'logistic',
'nearest',
'quadratic',
'cubic',
'exponential',
'none',
'decay_towards_linear_regression',
'average')
def _help_test_clean_empty_data(self):
newindex = np.arange(2000, 2051)
x = np.array([])
y = np.array([])
self.run_all_cleaning_methods(x, y, newindex)
def test_clean_empty_data(self):
self.assertRaises(IndexError, self._help_test_clean_empty_data)
def test_clean_one_point(self):
newindex = np.arange(2000, 2051)
x = np.array([2010])
y = np.array([.1])
self.run_all_cleaning_methods(x, y, newindex)
def test_clean_two_points(self):
newindex = np.arange(2000, 2051)
x = np.array([2010, 2050])
y = np.array([.1, .5])
self.run_all_cleaning_methods(x, y, newindex)
def test_clean_three_points(self):
newindex = np.arange(2000, 2051)
x = np.array([2010, 2018, 2025])
y = np.array([.8, .7, .4])
self.run_all_cleaning_methods(x, y, newindex)
def test_clean_scurve_points(self):
newindex = np.arange(2000, 2051)
x = np.array([2010, 2018, 2025, 2040, 2050])
y = np.array([.8, .7, .4, .35, .34])
self.run_all_cleaning_methods(x, y, newindex)
def test_clean_linear_points(self):
newindex = np.arange(2000, 2051)
x = np.array([2010, 2020, 2030, 2040, 2050])
y = np.array([.1, .2, .3, .4, .5])
self.run_all_cleaning_methods(x, y, newindex)
def test_clean_quadratic_points(self):
newindex = np.arange(2000, 2051)
x = np.arange(2010, 2030)
y = (x-2010)**2
self.run_all_cleaning_methods(x, y, newindex)
def test_three_zeros(self):
# this has been a problem with logistic curve fitting
newindex = np.arange(2000, 2051)
x = np.array([2010, 2011, 2013])
y = np.array([0, 0, 0])
self.run_all_cleaning_methods(x, y, newindex)
def test_two_zeros(self):
newindex = np.arange(2000, 2051)
x = np.array([2010, 2013])
y = np.array([0, 0])
self.run_all_cleaning_methods(x, y, newindex)
def run_all_cleaning_methods(self, x, y, newindex):
for method in self.methods:
data = pd.DataFrame(y, index=x)
newdata = TimeSeries.clean(data,
newindex=newindex,
interpolation_method=(None if method=='decay_towards_linear_regression' else method), # not supported for linear regression
extrapolation_method=method)
#newindex = np.arange(2015, 2025)
newindex = np.arange(2012, 2017)
x = np.array([2015, 2018, 2020])
y = np.array([.8, .7, .4])
data = pd.DataFrame(y, index=x)
newdata = TimeSeries.clean(data, newindex=newindex, interpolation_method='linear_interpolation', extrapolation_method='nearest')
#
#
#newindex = np.arange(2020, 2025)
#multi_data = pd.concat([data]*3, keys=['a', 'b', 'c'], names=['dummy', 'year'])
#newdata2 = TimeSeries.clean(multi_data, time_index_name='year', newindex=newindex, interpolation_method='linear_interpolation', extrapolation_method='nearest')
newindex = np.arange(2015, 2050)
multi_data = pd.concat([data]*3, keys=['a', 'b', 'c'], names=['dummy', 'year'])
newdata2 = TimeSeries.clean(multi_data, time_index_name='year', newindex=newindex, interpolation_method='nearest', extrapolation_method='exponential')
#raw_values = pd.read_csv('raw_values_example_for_clean_timeseries.csv')
#raw_values.set_index(['us', 'efficiency_type', 'supply_node', 'year'], inplace=True)
#raw_values.sort_index(inplace=True)
#
#newindex = [2015]
#newdata3 = TimeSeries.clean(raw_values, time_index_name='year', newindex=newindex, interpolation_method='linear_interpolation', extrapolation_method='nearest')
#
#print newdata3
newindex = np.arange(2012, 2030)
x = np.array([2015, 2016, 2018, 2020, 2021, 2025])
y = np.array([.8, np.inf, .7, .4, np.inf, np.nan])
data = pd.DataFrame(y, index=x)
newdata = TimeSeries.clean(data, newindex=newindex, interpolation_method='linear_interpolation', extrapolation_method='exponential')
| 34.804348 | 163 | 0.621278 |
__author__ = 'Ben, Ryan, Michael'
import numpy as np
from collections import defaultdict
import pandas as pd
import energyPATHWAYS
from energyPATHWAYS.time_series import TimeSeries
import unittest
from matplotlib import pyplot as plt
class TestTimeSeries(unittest.TestCase):
def setUp(self):
self.methods = ('linear_interpolation',
'linear_regression',
'logistic',
'nearest',
'quadratic',
'cubic',
'exponential',
'none',
'decay_towards_linear_regression',
'average')
def _help_test_clean_empty_data(self):
newindex = np.arange(2000, 2051)
x = np.array([])
y = np.array([])
self.run_all_cleaning_methods(x, y, newindex)
def test_clean_empty_data(self):
self.assertRaises(IndexError, self._help_test_clean_empty_data)
def test_clean_one_point(self):
newindex = np.arange(2000, 2051)
x = np.array([2010])
y = np.array([.1])
self.run_all_cleaning_methods(x, y, newindex)
def test_clean_two_points(self):
newindex = np.arange(2000, 2051)
x = np.array([2010, 2050])
y = np.array([.1, .5])
self.run_all_cleaning_methods(x, y, newindex)
def test_clean_three_points(self):
newindex = np.arange(2000, 2051)
x = np.array([2010, 2018, 2025])
y = np.array([.8, .7, .4])
self.run_all_cleaning_methods(x, y, newindex)
def test_clean_scurve_points(self):
newindex = np.arange(2000, 2051)
x = np.array([2010, 2018, 2025, 2040, 2050])
y = np.array([.8, .7, .4, .35, .34])
self.run_all_cleaning_methods(x, y, newindex)
def test_clean_linear_points(self):
newindex = np.arange(2000, 2051)
x = np.array([2010, 2020, 2030, 2040, 2050])
y = np.array([.1, .2, .3, .4, .5])
self.run_all_cleaning_methods(x, y, newindex)
def test_clean_quadratic_points(self):
newindex = np.arange(2000, 2051)
x = np.arange(2010, 2030)
y = (x-2010)**2
self.run_all_cleaning_methods(x, y, newindex)
def test_three_zeros(self):
newindex = np.arange(2000, 2051)
x = np.array([2010, 2011, 2013])
y = np.array([0, 0, 0])
self.run_all_cleaning_methods(x, y, newindex)
def test_two_zeros(self):
newindex = np.arange(2000, 2051)
x = np.array([2010, 2013])
y = np.array([0, 0])
self.run_all_cleaning_methods(x, y, newindex)
def run_all_cleaning_methods(self, x, y, newindex):
for method in self.methods:
data = pd.DataFrame(y, index=x)
newdata = TimeSeries.clean(data,
newindex=newindex,
interpolation_method=(None if method=='decay_towards_linear_regression' else method),
extrapolation_method=method)
newindex = np.arange(2012, 2017)
x = np.array([2015, 2018, 2020])
y = np.array([.8, .7, .4])
data = pd.DataFrame(y, index=x)
newdata = TimeSeries.clean(data, newindex=newindex, interpolation_method='linear_interpolation', extrapolation_method='nearest')
newindex = np.arange(2015, 2050)
multi_data = pd.concat([data]*3, keys=['a', 'b', 'c'], names=['dummy', 'year'])
newdata2 = TimeSeries.clean(multi_data, time_index_name='year', newindex=newindex, interpolation_method='nearest', extrapolation_method='exponential')
newindex = np.arange(2012, 2030)
x = np.array([2015, 2016, 2018, 2020, 2021, 2025])
y = np.array([.8, np.inf, .7, .4, np.inf, np.nan])
data = pd.DataFrame(y, index=x)
newdata = TimeSeries.clean(data, newindex=newindex, interpolation_method='linear_interpolation', extrapolation_method='exponential')
| true | true |
f72e69d4bd341d20916ee603e013db4ef7cad8db | 5,588 | py | Python | certbot/plugins/storage_test.py | pub-repository/letsencrypt | c4684f187a4ce4ef13425cfba607dec9d8bfa963 | [
"Apache-2.0"
] | 1 | 2019-12-29T16:34:18.000Z | 2019-12-29T16:34:18.000Z | certbot/plugins/storage_test.py | pub-repository/letsencrypt | c4684f187a4ce4ef13425cfba607dec9d8bfa963 | [
"Apache-2.0"
] | null | null | null | certbot/plugins/storage_test.py | pub-repository/letsencrypt | c4684f187a4ce4ef13425cfba607dec9d8bfa963 | [
"Apache-2.0"
] | 1 | 2019-12-29T16:34:20.000Z | 2019-12-29T16:34:20.000Z | """Tests for certbot.plugins.storage.PluginStorage"""
import json
import unittest
import mock
from certbot import errors
from certbot.compat import os
from certbot.compat import filesystem
from certbot.plugins import common
from certbot.tests import util as test_util
class PluginStorageTest(test_util.ConfigTestCase):
"""Test for certbot.plugins.storage.PluginStorage"""
def setUp(self):
super(PluginStorageTest, self).setUp()
self.plugin_cls = common.Installer
filesystem.mkdir(self.config.config_dir)
with mock.patch("certbot.reverter.util"):
self.plugin = self.plugin_cls(config=self.config, name="mockplugin")
def test_load_errors_cant_read(self):
with open(os.path.join(self.config.config_dir,
".pluginstorage.json"), "w") as fh:
fh.write("dummy")
# When unable to read file that exists
mock_open = mock.mock_open()
mock_open.side_effect = IOError
self.plugin.storage.storagepath = os.path.join(self.config.config_dir,
".pluginstorage.json")
with mock.patch("six.moves.builtins.open", mock_open):
with mock.patch('os.path.isfile', return_value=True):
with mock.patch("certbot.reverter.util"):
self.assertRaises(errors.PluginStorageError,
self.plugin.storage._load) # pylint: disable=protected-access
def test_load_errors_empty(self):
with open(os.path.join(self.config.config_dir, ".pluginstorage.json"), "w") as fh:
fh.write('')
with mock.patch("certbot.plugins.storage.logger.debug") as mock_log:
# Should not error out but write a debug log line instead
with mock.patch("certbot.reverter.util"):
nocontent = self.plugin_cls(self.config, "mockplugin")
self.assertRaises(KeyError,
nocontent.storage.fetch, "value")
self.assertTrue(mock_log.called)
self.assertTrue("no values loaded" in mock_log.call_args[0][0])
def test_load_errors_corrupted(self):
with open(os.path.join(self.config.config_dir,
".pluginstorage.json"), "w") as fh:
fh.write('invalid json')
with mock.patch("certbot.plugins.storage.logger.error") as mock_log:
with mock.patch("certbot.reverter.util"):
corrupted = self.plugin_cls(self.config, "mockplugin")
self.assertRaises(errors.PluginError,
corrupted.storage.fetch,
"value")
self.assertTrue("is corrupted" in mock_log.call_args[0][0])
def test_save_errors_cant_serialize(self):
with mock.patch("certbot.plugins.storage.logger.error") as mock_log:
# Set data as something that can't be serialized
self.plugin.storage._initialized = True # pylint: disable=protected-access
self.plugin.storage.storagepath = "/tmp/whatever"
self.plugin.storage._data = self.plugin_cls # pylint: disable=protected-access
self.assertRaises(errors.PluginStorageError,
self.plugin.storage.save)
self.assertTrue("Could not serialize" in mock_log.call_args[0][0])
def test_save_errors_unable_to_write_file(self):
mock_open = mock.mock_open()
mock_open.side_effect = IOError
with mock.patch("certbot.compat.filesystem.open", mock_open):
with mock.patch("certbot.plugins.storage.logger.error") as mock_log:
self.plugin.storage._data = {"valid": "data"} # pylint: disable=protected-access
self.plugin.storage._initialized = True # pylint: disable=protected-access
self.assertRaises(errors.PluginStorageError,
self.plugin.storage.save)
self.assertTrue("Could not write" in mock_log.call_args[0][0])
def test_save_uninitialized(self):
with mock.patch("certbot.reverter.util"):
self.assertRaises(errors.PluginStorageError,
self.plugin_cls(self.config, "x").storage.save)
def test_namespace_isolation(self):
with mock.patch("certbot.reverter.util"):
plugin1 = self.plugin_cls(self.config, "first")
plugin2 = self.plugin_cls(self.config, "second")
plugin1.storage.put("first_key", "first_value")
self.assertRaises(KeyError,
plugin2.storage.fetch, "first_key")
self.assertRaises(KeyError,
plugin2.storage.fetch, "first")
self.assertEqual(plugin1.storage.fetch("first_key"), "first_value")
def test_saved_state(self):
self.plugin.storage.put("testkey", "testvalue")
# Write to disk
self.plugin.storage.save()
with mock.patch("certbot.reverter.util"):
another = self.plugin_cls(self.config, "mockplugin")
self.assertEqual(another.storage.fetch("testkey"), "testvalue")
with open(os.path.join(self.config.config_dir,
".pluginstorage.json"), 'r') as fh:
psdata = fh.read()
psjson = json.loads(psdata)
self.assertTrue("mockplugin" in psjson.keys())
self.assertEqual(len(psjson), 1)
self.assertEqual(psjson["mockplugin"]["testkey"], "testvalue")
if __name__ == "__main__":
unittest.main() # pragma: no cover
| 46.566667 | 100 | 0.620616 | import json
import unittest
import mock
from certbot import errors
from certbot.compat import os
from certbot.compat import filesystem
from certbot.plugins import common
from certbot.tests import util as test_util
class PluginStorageTest(test_util.ConfigTestCase):
def setUp(self):
super(PluginStorageTest, self).setUp()
self.plugin_cls = common.Installer
filesystem.mkdir(self.config.config_dir)
with mock.patch("certbot.reverter.util"):
self.plugin = self.plugin_cls(config=self.config, name="mockplugin")
def test_load_errors_cant_read(self):
with open(os.path.join(self.config.config_dir,
".pluginstorage.json"), "w") as fh:
fh.write("dummy")
mock_open = mock.mock_open()
mock_open.side_effect = IOError
self.plugin.storage.storagepath = os.path.join(self.config.config_dir,
".pluginstorage.json")
with mock.patch("six.moves.builtins.open", mock_open):
with mock.patch('os.path.isfile', return_value=True):
with mock.patch("certbot.reverter.util"):
self.assertRaises(errors.PluginStorageError,
self.plugin.storage._load)
def test_load_errors_empty(self):
with open(os.path.join(self.config.config_dir, ".pluginstorage.json"), "w") as fh:
fh.write('')
with mock.patch("certbot.plugins.storage.logger.debug") as mock_log:
with mock.patch("certbot.reverter.util"):
nocontent = self.plugin_cls(self.config, "mockplugin")
self.assertRaises(KeyError,
nocontent.storage.fetch, "value")
self.assertTrue(mock_log.called)
self.assertTrue("no values loaded" in mock_log.call_args[0][0])
def test_load_errors_corrupted(self):
with open(os.path.join(self.config.config_dir,
".pluginstorage.json"), "w") as fh:
fh.write('invalid json')
with mock.patch("certbot.plugins.storage.logger.error") as mock_log:
with mock.patch("certbot.reverter.util"):
corrupted = self.plugin_cls(self.config, "mockplugin")
self.assertRaises(errors.PluginError,
corrupted.storage.fetch,
"value")
self.assertTrue("is corrupted" in mock_log.call_args[0][0])
def test_save_errors_cant_serialize(self):
with mock.patch("certbot.plugins.storage.logger.error") as mock_log:
self.plugin.storage._initialized = True # pylint: disable=protected-access
self.plugin.storage.storagepath = "/tmp/whatever"
self.plugin.storage._data = self.plugin_cls # pylint: disable=protected-access
self.assertRaises(errors.PluginStorageError,
self.plugin.storage.save)
self.assertTrue("Could not serialize" in mock_log.call_args[0][0])
def test_save_errors_unable_to_write_file(self):
mock_open = mock.mock_open()
mock_open.side_effect = IOError
with mock.patch("certbot.compat.filesystem.open", mock_open):
with mock.patch("certbot.plugins.storage.logger.error") as mock_log:
self.plugin.storage._data = {"valid": "data"} # pylint: disable=protected-access
self.plugin.storage._initialized = True # pylint: disable=protected-access
self.assertRaises(errors.PluginStorageError,
self.plugin.storage.save)
self.assertTrue("Could not write" in mock_log.call_args[0][0])
def test_save_uninitialized(self):
with mock.patch("certbot.reverter.util"):
self.assertRaises(errors.PluginStorageError,
self.plugin_cls(self.config, "x").storage.save)
def test_namespace_isolation(self):
with mock.patch("certbot.reverter.util"):
plugin1 = self.plugin_cls(self.config, "first")
plugin2 = self.plugin_cls(self.config, "second")
plugin1.storage.put("first_key", "first_value")
self.assertRaises(KeyError,
plugin2.storage.fetch, "first_key")
self.assertRaises(KeyError,
plugin2.storage.fetch, "first")
self.assertEqual(plugin1.storage.fetch("first_key"), "first_value")
def test_saved_state(self):
self.plugin.storage.put("testkey", "testvalue")
# Write to disk
self.plugin.storage.save()
with mock.patch("certbot.reverter.util"):
another = self.plugin_cls(self.config, "mockplugin")
self.assertEqual(another.storage.fetch("testkey"), "testvalue")
with open(os.path.join(self.config.config_dir,
".pluginstorage.json"), 'r') as fh:
psdata = fh.read()
psjson = json.loads(psdata)
self.assertTrue("mockplugin" in psjson.keys())
self.assertEqual(len(psjson), 1)
self.assertEqual(psjson["mockplugin"]["testkey"], "testvalue")
if __name__ == "__main__":
unittest.main() # pragma: no cover
| true | true |
f72e6b9fa3ef86cd10c13a1694c38d2ce9a37f73 | 83,070 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_08_01/operations/_network_watchers_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 1 | 2021-06-02T08:01:35.000Z | 2021-06-02T08:01:35.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_08_01/operations/_network_watchers_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_08_01/operations/_network_watchers_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class NetworkWatchersOperations(object):
"""NetworkWatchersOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2019-08-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2019-08-01"
self.config = config
def create_or_update(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a network watcher in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the network watcher
resource.
:type parameters:
~azure.mgmt.network.v2019_08_01.models.NetworkWatcher
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NetworkWatcher or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2019_08_01.models.NetworkWatcher or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.network.v2019_08_01.models.ErrorResponseException>`
"""
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'NetworkWatcher')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkWatcher', response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkWatcher', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'}
def get(
self, resource_group_name, network_watcher_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified network watcher by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NetworkWatcher or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2019_08_01.models.NetworkWatcher or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.network.v2019_08_01.models.ErrorResponseException>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkWatcher', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'}
def _delete_initial(
self, resource_group_name, network_watcher_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [202, 204]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, network_watcher_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified network watcher resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises:
:class:`ErrorResponseException<azure.mgmt.network.v2019_08_01.models.ErrorResponseException>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'}
def update_tags(
self, resource_group_name, network_watcher_name, tags=None, custom_headers=None, raw=False, **operation_config):
"""Updates a network watcher tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NetworkWatcher or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2019_08_01.models.NetworkWatcher or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.network.v2019_08_01.models.ErrorResponseException>`
"""
parameters = models.TagsObject(tags=tags)
# Construct URL
url = self.update_tags.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TagsObject')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkWatcher', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all network watchers by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NetworkWatcher
:rtype:
~azure.mgmt.network.v2019_08_01.models.NetworkWatcherPaged[~azure.mgmt.network.v2019_08_01.models.NetworkWatcher]
:raises:
:class:`ErrorResponseException<azure.mgmt.network.v2019_08_01.models.ErrorResponseException>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.NetworkWatcherPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers'}
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all network watchers by subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NetworkWatcher
:rtype:
~azure.mgmt.network.v2019_08_01.models.NetworkWatcherPaged[~azure.mgmt.network.v2019_08_01.models.NetworkWatcher]
:raises:
:class:`ErrorResponseException<azure.mgmt.network.v2019_08_01.models.ErrorResponseException>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.NetworkWatcherPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkWatchers'}
def get_topology(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Gets the current network topology by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the representation of
topology.
:type parameters:
~azure.mgmt.network.v2019_08_01.models.TopologyParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Topology or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2019_08_01.models.Topology or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.network.v2019_08_01.models.ErrorResponseException>`
"""
# Construct URL
url = self.get_topology.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TopologyParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Topology', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_topology.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/topology'}
def _verify_ip_flow_initial(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.verify_ip_flow.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VerificationIPFlowParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VerificationIPFlowResult', response)
if response.status_code == 202:
deserialized = self._deserialize('VerificationIPFlowResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def verify_ip_flow(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Verify IP flow from the specified VM to a location given the currently
configured NSG rules.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the IP flow to be verified.
:type parameters:
~azure.mgmt.network.v2019_08_01.models.VerificationIPFlowParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
VerificationIPFlowResult or
ClientRawResponse<VerificationIPFlowResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2019_08_01.models.VerificationIPFlowResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2019_08_01.models.VerificationIPFlowResult]]
:raises:
:class:`ErrorResponseException<azure.mgmt.network.v2019_08_01.models.ErrorResponseException>`
"""
raw_result = self._verify_ip_flow_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VerificationIPFlowResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
verify_ip_flow.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/ipFlowVerify'}
def _get_next_hop_initial(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_next_hop.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'NextHopParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NextHopResult', response)
if response.status_code == 202:
deserialized = self._deserialize('NextHopResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_next_hop(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gets the next hop from the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the source and destination
endpoint.
:type parameters:
~azure.mgmt.network.v2019_08_01.models.NextHopParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns NextHopResult or
ClientRawResponse<NextHopResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2019_08_01.models.NextHopResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2019_08_01.models.NextHopResult]]
:raises:
:class:`ErrorResponseException<azure.mgmt.network.v2019_08_01.models.ErrorResponseException>`
"""
raw_result = self._get_next_hop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('NextHopResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_next_hop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/nextHop'}
def _get_vm_security_rules_initial(
self, resource_group_name, network_watcher_name, target_resource_id, custom_headers=None, raw=False, **operation_config):
parameters = models.SecurityGroupViewParameters(target_resource_id=target_resource_id)
# Construct URL
url = self.get_vm_security_rules.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'SecurityGroupViewParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SecurityGroupViewResult', response)
if response.status_code == 202:
deserialized = self._deserialize('SecurityGroupViewResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_vm_security_rules(
self, resource_group_name, network_watcher_name, target_resource_id, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gets the configured and effective security group rules on the specified
VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param target_resource_id: ID of the target VM.
:type target_resource_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns SecurityGroupViewResult
or ClientRawResponse<SecurityGroupViewResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2019_08_01.models.SecurityGroupViewResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2019_08_01.models.SecurityGroupViewResult]]
:raises:
:class:`ErrorResponseException<azure.mgmt.network.v2019_08_01.models.ErrorResponseException>`
"""
raw_result = self._get_vm_security_rules_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
target_resource_id=target_resource_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('SecurityGroupViewResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_vm_security_rules.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/securityGroupView'}
def _get_troubleshooting_initial(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_troubleshooting.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TroubleshootingParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TroubleshootingResult', response)
if response.status_code == 202:
deserialized = self._deserialize('TroubleshootingResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_troubleshooting(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Initiate troubleshooting on a specified resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define the resource to
troubleshoot.
:type parameters:
~azure.mgmt.network.v2019_08_01.models.TroubleshootingParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns TroubleshootingResult
or ClientRawResponse<TroubleshootingResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2019_08_01.models.TroubleshootingResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2019_08_01.models.TroubleshootingResult]]
:raises:
:class:`ErrorResponseException<azure.mgmt.network.v2019_08_01.models.ErrorResponseException>`
"""
raw_result = self._get_troubleshooting_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('TroubleshootingResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_troubleshooting.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/troubleshoot'}
def _get_troubleshooting_result_initial(
self, resource_group_name, network_watcher_name, target_resource_id, custom_headers=None, raw=False, **operation_config):
parameters = models.QueryTroubleshootingParameters(target_resource_id=target_resource_id)
# Construct URL
url = self.get_troubleshooting_result.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'QueryTroubleshootingParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TroubleshootingResult', response)
if response.status_code == 202:
deserialized = self._deserialize('TroubleshootingResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_troubleshooting_result(
self, resource_group_name, network_watcher_name, target_resource_id, custom_headers=None, raw=False, polling=True, **operation_config):
"""Get the last completed troubleshooting result on a specified resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param target_resource_id: The target resource ID to query the
troubleshooting result.
:type target_resource_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns TroubleshootingResult
or ClientRawResponse<TroubleshootingResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2019_08_01.models.TroubleshootingResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2019_08_01.models.TroubleshootingResult]]
:raises:
:class:`ErrorResponseException<azure.mgmt.network.v2019_08_01.models.ErrorResponseException>`
"""
raw_result = self._get_troubleshooting_result_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
target_resource_id=target_resource_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('TroubleshootingResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_troubleshooting_result.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryTroubleshootResult'}
def _set_flow_log_configuration_initial(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.set_flow_log_configuration.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'FlowLogInformation')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FlowLogInformation', response)
if response.status_code == 202:
deserialized = self._deserialize('FlowLogInformation', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def set_flow_log_configuration(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Configures flow log and traffic analytics (optional) on a specified
resource.
:param resource_group_name: The name of the network watcher resource
group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define the configuration of flow
log.
:type parameters:
~azure.mgmt.network.v2019_08_01.models.FlowLogInformation
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns FlowLogInformation or
ClientRawResponse<FlowLogInformation> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2019_08_01.models.FlowLogInformation]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2019_08_01.models.FlowLogInformation]]
:raises:
:class:`ErrorResponseException<azure.mgmt.network.v2019_08_01.models.ErrorResponseException>`
"""
raw_result = self._set_flow_log_configuration_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('FlowLogInformation', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
set_flow_log_configuration.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/configureFlowLog'}
def _get_flow_log_status_initial(
self, resource_group_name, network_watcher_name, target_resource_id, custom_headers=None, raw=False, **operation_config):
parameters = models.FlowLogStatusParameters(target_resource_id=target_resource_id)
# Construct URL
url = self.get_flow_log_status.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'FlowLogStatusParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FlowLogInformation', response)
if response.status_code == 202:
deserialized = self._deserialize('FlowLogInformation', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_flow_log_status(
self, resource_group_name, network_watcher_name, target_resource_id, custom_headers=None, raw=False, polling=True, **operation_config):
"""Queries status of flow log and traffic analytics (optional) on a
specified resource.
:param resource_group_name: The name of the network watcher resource
group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param target_resource_id: The target resource where getting the flow
log and traffic analytics (optional) status.
:type target_resource_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns FlowLogInformation or
ClientRawResponse<FlowLogInformation> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2019_08_01.models.FlowLogInformation]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2019_08_01.models.FlowLogInformation]]
:raises:
:class:`ErrorResponseException<azure.mgmt.network.v2019_08_01.models.ErrorResponseException>`
"""
raw_result = self._get_flow_log_status_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
target_resource_id=target_resource_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('FlowLogInformation', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_flow_log_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryFlowLogStatus'}
def _check_connectivity_initial(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.check_connectivity.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ConnectivityParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConnectivityInformation', response)
if response.status_code == 202:
deserialized = self._deserialize('ConnectivityInformation', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def check_connectivity(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Verifies the possibility of establishing a direct TCP connection from a
virtual machine to a given endpoint including another VM or an
arbitrary remote server.
:param resource_group_name: The name of the network watcher resource
group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that determine how the connectivity
check will be performed.
:type parameters:
~azure.mgmt.network.v2019_08_01.models.ConnectivityParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns ConnectivityInformation
or ClientRawResponse<ConnectivityInformation> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2019_08_01.models.ConnectivityInformation]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2019_08_01.models.ConnectivityInformation]]
:raises:
:class:`ErrorResponseException<azure.mgmt.network.v2019_08_01.models.ErrorResponseException>`
"""
raw_result = self._check_connectivity_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ConnectivityInformation', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
check_connectivity.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectivityCheck'}
def _get_azure_reachability_report_initial(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_azure_reachability_report.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'AzureReachabilityReportParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AzureReachabilityReport', response)
if response.status_code == 202:
deserialized = self._deserialize('AzureReachabilityReport', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_azure_reachability_report(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gets the relative latency score for internet service providers from a
specified location to Azure regions.
:param resource_group_name: The name of the network watcher resource
group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that determine Azure reachability report
configuration.
:type parameters:
~azure.mgmt.network.v2019_08_01.models.AzureReachabilityReportParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns AzureReachabilityReport
or ClientRawResponse<AzureReachabilityReport> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2019_08_01.models.AzureReachabilityReport]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2019_08_01.models.AzureReachabilityReport]]
:raises:
:class:`ErrorResponseException<azure.mgmt.network.v2019_08_01.models.ErrorResponseException>`
"""
raw_result = self._get_azure_reachability_report_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('AzureReachabilityReport', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_azure_reachability_report.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/azureReachabilityReport'}
def _list_available_providers_initial(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.list_available_providers.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'AvailableProvidersListParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AvailableProvidersList', response)
if response.status_code == 202:
deserialized = self._deserialize('AvailableProvidersList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_available_providers(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Lists all available internet service providers for a specified Azure
region.
:param resource_group_name: The name of the network watcher resource
group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that scope the list of available
providers.
:type parameters:
~azure.mgmt.network.v2019_08_01.models.AvailableProvidersListParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns AvailableProvidersList
or ClientRawResponse<AvailableProvidersList> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2019_08_01.models.AvailableProvidersList]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2019_08_01.models.AvailableProvidersList]]
:raises:
:class:`ErrorResponseException<azure.mgmt.network.v2019_08_01.models.ErrorResponseException>`
"""
raw_result = self._list_available_providers_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('AvailableProvidersList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
list_available_providers.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/availableProvidersList'}
def _get_network_configuration_diagnostic_initial(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_network_configuration_diagnostic.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'NetworkConfigurationDiagnosticParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkConfigurationDiagnosticResponse', response)
if response.status_code == 202:
deserialized = self._deserialize('NetworkConfigurationDiagnosticResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_network_configuration_diagnostic(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gets Network Configuration Diagnostic data to help customers understand
and debug network behavior. It provides detailed information on what
security rules were applied to a specified traffic flow and the result
of evaluating these rules. Customers must provide details of a flow
like source, destination, protocol, etc. The API returns whether
traffic was allowed or denied, the rules evaluated for the specified
flow and the evaluation results.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters to get network configuration diagnostic.
:type parameters:
~azure.mgmt.network.v2019_08_01.models.NetworkConfigurationDiagnosticParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
NetworkConfigurationDiagnosticResponse or
ClientRawResponse<NetworkConfigurationDiagnosticResponse> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2019_08_01.models.NetworkConfigurationDiagnosticResponse]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2019_08_01.models.NetworkConfigurationDiagnosticResponse]]
:raises:
:class:`ErrorResponseException<azure.mgmt.network.v2019_08_01.models.ErrorResponseException>`
"""
raw_result = self._get_network_configuration_diagnostic_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('NetworkConfigurationDiagnosticResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_network_configuration_diagnostic.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/networkConfigurationDiagnostic'}
| 49.653317 | 225 | 0.694402 |
import uuid
from msrest.pipeline import ClientRawResponse
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class NetworkWatchersOperations(object):
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2019-08-01"
self.config = config
def create_or_update(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'NetworkWatcher')
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkWatcher', response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkWatcher', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'}
def get(
self, resource_group_name, network_watcher_name, custom_headers=None, raw=False, **operation_config):
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkWatcher', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'}
def _delete_initial(
self, resource_group_name, network_watcher_name, custom_headers=None, raw=False, **operation_config):
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [202, 204]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, network_watcher_name, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'}
def update_tags(
self, resource_group_name, network_watcher_name, tags=None, custom_headers=None, raw=False, **operation_config):
parameters = models.TagsObject(tags=tags)
url = self.update_tags.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'TagsObject')
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkWatcher', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
def prepare_request(next_link=None):
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
header_dict = None
if raw:
header_dict = {}
deserialized = models.NetworkWatcherPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers'}
def list_all(
self, custom_headers=None, raw=False, **operation_config):
def prepare_request(next_link=None):
if not next_link:
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
header_dict = None
if raw:
header_dict = {}
deserialized = models.NetworkWatcherPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkWatchers'}
def get_topology(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
url = self.get_topology.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'TopologyParameters')
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Topology', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_topology.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/topology'}
def _verify_ip_flow_initial(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
url = self.verify_ip_flow.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'VerificationIPFlowParameters')
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VerificationIPFlowResult', response)
if response.status_code == 202:
deserialized = self._deserialize('VerificationIPFlowResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def verify_ip_flow(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._verify_ip_flow_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VerificationIPFlowResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
verify_ip_flow.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/ipFlowVerify'}
def _get_next_hop_initial(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
url = self.get_next_hop.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'NextHopParameters')
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NextHopResult', response)
if response.status_code == 202:
deserialized = self._deserialize('NextHopResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_next_hop(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._get_next_hop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('NextHopResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_next_hop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/nextHop'}
def _get_vm_security_rules_initial(
self, resource_group_name, network_watcher_name, target_resource_id, custom_headers=None, raw=False, **operation_config):
parameters = models.SecurityGroupViewParameters(target_resource_id=target_resource_id)
url = self.get_vm_security_rules.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'SecurityGroupViewParameters')
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SecurityGroupViewResult', response)
if response.status_code == 202:
deserialized = self._deserialize('SecurityGroupViewResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_vm_security_rules(
self, resource_group_name, network_watcher_name, target_resource_id, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._get_vm_security_rules_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
target_resource_id=target_resource_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('SecurityGroupViewResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_vm_security_rules.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/securityGroupView'}
def _get_troubleshooting_initial(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
url = self.get_troubleshooting.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'TroubleshootingParameters')
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TroubleshootingResult', response)
if response.status_code == 202:
deserialized = self._deserialize('TroubleshootingResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_troubleshooting(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._get_troubleshooting_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('TroubleshootingResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_troubleshooting.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/troubleshoot'}
def _get_troubleshooting_result_initial(
self, resource_group_name, network_watcher_name, target_resource_id, custom_headers=None, raw=False, **operation_config):
parameters = models.QueryTroubleshootingParameters(target_resource_id=target_resource_id)
url = self.get_troubleshooting_result.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'QueryTroubleshootingParameters')
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TroubleshootingResult', response)
if response.status_code == 202:
deserialized = self._deserialize('TroubleshootingResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_troubleshooting_result(
self, resource_group_name, network_watcher_name, target_resource_id, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._get_troubleshooting_result_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
target_resource_id=target_resource_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('TroubleshootingResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_troubleshooting_result.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryTroubleshootResult'}
def _set_flow_log_configuration_initial(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
url = self.set_flow_log_configuration.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'FlowLogInformation')
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FlowLogInformation', response)
if response.status_code == 202:
deserialized = self._deserialize('FlowLogInformation', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def set_flow_log_configuration(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._set_flow_log_configuration_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('FlowLogInformation', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
set_flow_log_configuration.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/configureFlowLog'}
def _get_flow_log_status_initial(
self, resource_group_name, network_watcher_name, target_resource_id, custom_headers=None, raw=False, **operation_config):
parameters = models.FlowLogStatusParameters(target_resource_id=target_resource_id)
url = self.get_flow_log_status.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'FlowLogStatusParameters')
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FlowLogInformation', response)
if response.status_code == 202:
deserialized = self._deserialize('FlowLogInformation', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_flow_log_status(
self, resource_group_name, network_watcher_name, target_resource_id, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._get_flow_log_status_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
target_resource_id=target_resource_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('FlowLogInformation', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_flow_log_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryFlowLogStatus'}
def _check_connectivity_initial(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
url = self.check_connectivity.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'ConnectivityParameters')
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConnectivityInformation', response)
if response.status_code == 202:
deserialized = self._deserialize('ConnectivityInformation', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def check_connectivity(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._check_connectivity_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ConnectivityInformation', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
check_connectivity.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectivityCheck'}
def _get_azure_reachability_report_initial(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
url = self.get_azure_reachability_report.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'AzureReachabilityReportParameters')
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AzureReachabilityReport', response)
if response.status_code == 202:
deserialized = self._deserialize('AzureReachabilityReport', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_azure_reachability_report(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._get_azure_reachability_report_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('AzureReachabilityReport', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_azure_reachability_report.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/azureReachabilityReport'}
def _list_available_providers_initial(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
url = self.list_available_providers.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'AvailableProvidersListParameters')
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AvailableProvidersList', response)
if response.status_code == 202:
deserialized = self._deserialize('AvailableProvidersList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_available_providers(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._list_available_providers_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('AvailableProvidersList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
list_available_providers.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/availableProvidersList'}
def _get_network_configuration_diagnostic_initial(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
url = self.get_network_configuration_diagnostic.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'NetworkConfigurationDiagnosticParameters')
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkConfigurationDiagnosticResponse', response)
if response.status_code == 202:
deserialized = self._deserialize('NetworkConfigurationDiagnosticResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_network_configuration_diagnostic(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._get_network_configuration_diagnostic_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('NetworkConfigurationDiagnosticResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_network_configuration_diagnostic.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/networkConfigurationDiagnostic'}
| true | true |
f72e6bfbeca4a53d02af3c2b08549ad0c766ce10 | 1,060 | py | Python | pq/jobs.py | quantmind/pulsar-queue | 26ec8cbc291a830bd338c31f8a129cbd15f87a69 | [
"BSD-3-Clause"
] | 64 | 2015-09-25T19:28:17.000Z | 2022-02-15T20:52:03.000Z | pq/jobs.py | quantmind/pulsar-queue | 26ec8cbc291a830bd338c31f8a129cbd15f87a69 | [
"BSD-3-Clause"
] | 16 | 2015-09-23T13:35:10.000Z | 2017-11-25T09:32:15.000Z | pq/jobs.py | quantmind/pulsar-queue | 26ec8cbc291a830bd338c31f8a129cbd15f87a69 | [
"BSD-3-Clause"
] | 11 | 2015-10-01T14:39:36.000Z | 2021-12-31T06:57:49.000Z | """Useful Job for the task queue.
Include this file in the ``task_paths`` list if you need them
"""
import sys
import os
import tempfile
from pq.api import job
@job()
async def execute_python(self, code=None):
"""Execute arbitrary python code on a subprocess. For example:
tasks.queue_task('execute.python', code='print("Hello World!")')
"""
assert isinstance(code, str), "code must be a string"
fp, path = tempfile.mkstemp(suffix='.py', text=True)
try:
with open(path, 'w') as fp:
fp.write(code)
command = '%s %s' % (sys.executable, path)
result = await self.shell(command)
finally:
os.remove(path)
return result
@job()
async def execute_python_script(self, script=None):
"""Execute arbitrary python code on a subprocess
"""
assert isinstance(script, str), "script must be a string"
assert os.path.isfile(script), "script %s is not a file" % script
command = '%s %s' % (sys.executable, script)
result = await self.shell(command)
return result
| 26.5 | 72 | 0.649057 | import sys
import os
import tempfile
from pq.api import job
@job()
async def execute_python(self, code=None):
assert isinstance(code, str), "code must be a string"
fp, path = tempfile.mkstemp(suffix='.py', text=True)
try:
with open(path, 'w') as fp:
fp.write(code)
command = '%s %s' % (sys.executable, path)
result = await self.shell(command)
finally:
os.remove(path)
return result
@job()
async def execute_python_script(self, script=None):
assert isinstance(script, str), "script must be a string"
assert os.path.isfile(script), "script %s is not a file" % script
command = '%s %s' % (sys.executable, script)
result = await self.shell(command)
return result
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.