blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7aeb49961bf700f815df38b88710cf0c0a3b9b47
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/apps/formplayer_api/migrations/0001_drop_old_tables.py
|
046884ad9ef254284f1855fa85bbb6fd374f7049
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 505
|
py
|
0001_drop_old_tables.py
|
# Generated by Django 2.2.27 on 2022-03-08 20:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.RunSQL("DROP TABLE IF EXISTS formplayer_entrysession", "SELECT 1"),
migrations.RunSQL("DROP TABLE IF EXISTS formplayer_session", "SELECT 1"),
migrations.RunSQL("DROP TABLE IF EXISTS formplayer_sqlstatus", "SELECT 1"),
migrations.RunSQL("DROP TABLE IF EXISTS formplayer_xform", "SELECT 1"),
]
|
36ebb9fb642c608d980d316cc1f14e15ff8f2c26
|
e7efae2b83216d9621bd93390959d652de779c3d
|
/hyperv/tests/common.py
|
ea63fe2d85856dbdb849b84c5428faa876268ef3
|
[
"BSD-3-Clause",
"MIT",
"BSD-3-Clause-Modification",
"Unlicense",
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0"
] |
permissive
|
DataDog/integrations-core
|
ee1886cc7655972b2791e6ab8a1c62ab35afdb47
|
406072e4294edff5b46b513f0cdf7c2c00fac9d2
|
refs/heads/master
| 2023-08-31T04:08:06.243593
| 2023-08-30T18:22:10
| 2023-08-30T18:22:10
| 47,203,045
| 852
| 1,548
|
BSD-3-Clause
| 2023-09-14T16:39:54
| 2015-12-01T16:41:45
|
Python
|
UTF-8
|
Python
| false
| false
| 211
|
py
|
common.py
|
# (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
INSTANCE_REFRESH = {'refresh_counters': True}
INSTANCE_NO_REFRESH = {'refresh_counters': False}
|
9e211e81a0deb0f591552525c72968a15ff8929f
|
2f1e3f24f2798507c9eb73185a955c9bfb735140
|
/libreco/tfops/features.py
|
8c407ce685eb9da743e5f4af86eb797fe319c417
|
[
"MIT"
] |
permissive
|
massquantity/LibRecommender
|
e4f55b06b2208c794a3f97f7ff89413fa9beaffa
|
8d5fbe9c177f5b91c2b6f19a155a83320dd0e20c
|
refs/heads/master
| 2023-08-31T23:48:37.634663
| 2023-08-20T11:58:15
| 2023-08-20T11:58:15
| 174,493,761
| 251
| 55
|
MIT
| 2023-08-20T11:58:16
| 2019-03-08T07:58:27
|
Python
|
UTF-8
|
Python
| false
| false
| 10,644
|
py
|
features.py
|
from .variables import get_variable_from_graph
from .version import tf
from ..layers import embedding_lookup, layer_normalization
def compute_sparse_feats(
data_info,
multi_sparse_combiner,
all_sparse_indices,
var_name,
var_shape,
initializer=None,
regularizer=None,
reuse_layer=None,
scope_name="embedding",
flatten=False,
):
reuse = tf.AUTO_REUSE if reuse_layer else None
with tf.variable_scope(scope_name, reuse=reuse):
embed_var = tf.get_variable(
name=var_name,
shape=var_shape,
initializer=initializer,
regularizer=regularizer,
)
if (
data_info.multi_sparse_combine_info
and multi_sparse_combiner in ("sum", "mean", "sqrtn")
): # fmt: skip
embed_size = var_shape[1] if len(var_shape) == 2 else 1
sparse_embeds = multi_sparse_combine_embedding(
data_info,
embed_var,
all_sparse_indices,
multi_sparse_combiner,
embed_size,
)
else:
sparse_embeds = tf.nn.embedding_lookup(embed_var, all_sparse_indices)
if flatten:
sparse_embeds = tf.keras.layers.Flatten()(sparse_embeds)
return sparse_embeds
def multi_sparse_combine_embedding(
data_info, embed_var, all_sparse_indices, combiner, embed_size
):
field_offsets = data_info.multi_sparse_combine_info.field_offset
field_lens = data_info.multi_sparse_combine_info.field_len
feat_oovs = data_info.multi_sparse_combine_info.feat_oov
sparse_end = field_offsets[0]
# only one multi_sparse feature and no sparse features
if sparse_end == 0 and len(field_offsets) == 1:
result = multi_sparse_alone(
embed_var,
all_sparse_indices,
combiner,
embed_size,
field_offsets[0],
field_lens[0],
feat_oovs[0],
)
else:
if sparse_end > 0:
sparse_indices = all_sparse_indices[:, :sparse_end]
sparse_embedding = tf.nn.embedding_lookup(embed_var, sparse_indices)
result = [sparse_embedding]
else:
result = []
for offset, length, oov in zip(field_offsets, field_lens, feat_oovs):
result.append(
multi_sparse_alone(
embed_var,
all_sparse_indices,
combiner,
embed_size,
offset,
length,
oov,
)
)
result = tf.concat(result, axis=1)
return result
def multi_sparse_alone(
embed_var, all_sparse_indices, combiner, embed_size, offset, length, oov
):
variable_dim = len(embed_var.get_shape().as_list())
# oov feats are padded to 0-vector
oov_indices = [oov] if variable_dim == 1 else oov
zero_padding_op = tf.scatter_update(
embed_var, oov_indices, tf.zeros([embed_size], dtype=tf.float32)
)
multi_sparse_indices = all_sparse_indices[:, offset : offset + length]
with tf.control_dependencies([zero_padding_op]):
multi_sparse_embed = tf.nn.embedding_lookup(embed_var, multi_sparse_indices)
res_embed = tf.reduce_sum(multi_sparse_embed, axis=1, keepdims=True)
if combiner in ("mean", "sqrtn"):
multi_sparse_lens = tf.reduce_sum(
tf.cast(tf.not_equal(multi_sparse_indices, oov), tf.float32),
axis=1,
keepdims=True,
)
if combiner == "sqrtn":
multi_sparse_lens = tf.sqrt(multi_sparse_lens)
if variable_dim == 2:
multi_sparse_lens = tf.expand_dims(multi_sparse_lens, axis=1)
res_embed = tf.div_no_nan(res_embed, multi_sparse_lens)
return res_embed
def compute_dense_feats(
dense_values,
var_name,
var_shape,
initializer=None,
regularizer=None,
reuse_layer=None,
scope_name="embedding",
flatten=False,
):
if len(var_shape) == 2:
dense_values = dense_values[:, :, tf.newaxis]
reuse = tf.AUTO_REUSE if reuse_layer else None
with tf.variable_scope(scope_name, reuse=reuse):
embed_var = tf.get_variable(
name=var_name,
shape=var_shape,
initializer=initializer,
regularizer=regularizer,
)
batch_size = tf.shape(dense_values)[0]
multiple = [batch_size, 1] if len(var_shape) == 1 else [batch_size, 1, 1]
embed_var = tf.tile(tf.expand_dims(embed_var, axis=0), multiple)
dense_embeds = embed_var * dense_values
if flatten:
dense_embeds = tf.keras.layers.Flatten()(dense_embeds)
return dense_embeds
def combine_seq_features(data_info, feat_agg_mode):
"""Aggregate all item features together for sequence attention.
This operation assumes all variables have been initialized before.
Parameters
----------
data_info : `DataInfo` object.
feat_agg_mode : str
"concat" or "elementwise"
Returns
-------
Shape: V * K, where V is the total item num.
"""
item_embeds = embedding_lookup(
indices=tf.range(data_info.n_items + 1, dtype=tf.int32),
var_name="item_embeds_var",
reuse_layer=True,
)
if data_info.item_sparse_unique is not None:
# contains unique sparse field indices for each item
item_sparse_fields = tf.convert_to_tensor(
data_info.item_sparse_unique, dtype=tf.int32
)
# V * F_sparse * K
sparse_embeds = embedding_lookup(
indices=item_sparse_fields,
var_name="sparse_embeds_var",
reuse_layer=True,
)
else:
sparse_embeds = None
if data_info.item_dense_unique is not None:
# V * F_dense, contains unique dense values for each item
item_dense_values = tf.convert_to_tensor(
data_info.item_dense_unique, dtype=tf.float32
)
dense_embeds_var = get_variable_from_graph("dense_embeds_var", "embedding")
# F_dense * K
item_dense_embeds = tf.gather(dense_embeds_var, data_info.item_dense_col.index)
# V * F_dense * K
dense_embeds = tf.multiply(
item_dense_values[:, :, tf.newaxis], item_dense_embeds[tf.newaxis, :, :]
)
else:
dense_embeds = None
if feat_agg_mode == "concat":
return _concat_features(item_embeds, sparse_embeds, dense_embeds)
else:
return _elementwise_features(item_embeds, sparse_embeds, dense_embeds)
def _concat_features(item_embeds, sparse_embeds, dense_embeds):
if sparse_embeds is not None:
sparse_embeds = tf.keras.layers.Flatten()(sparse_embeds)
if dense_embeds is not None:
dense_embeds = tf.keras.layers.Flatten()(dense_embeds)
if sparse_embeds is not None and dense_embeds is not None:
return tf.concat([item_embeds, sparse_embeds, item_embeds], axis=1)
elif sparse_embeds is not None:
return tf.concat([item_embeds, sparse_embeds], axis=1)
elif dense_embeds is not None:
return tf.concat([item_embeds, dense_embeds], axis=1)
else:
return item_embeds
def _elementwise_features(item_embeds, sparse_embeds, dense_embeds):
if sparse_embeds is not None:
with tf.variable_scope("elementwise_sparse_feats"):
sparse_embeds = tf.reduce_sum(layer_normalization(sparse_embeds), axis=1)
if dense_embeds is not None:
with tf.variable_scope("elementwise_dense_feats"):
dense_embeds = tf.reduce_sum(layer_normalization(dense_embeds), axis=1)
if sparse_embeds is not None and dense_embeds is not None:
return item_embeds * (sparse_embeds + dense_embeds + 1.0)
elif sparse_embeds is not None:
return item_embeds * (sparse_embeds + 1.0)
elif dense_embeds is not None:
return item_embeds * (dense_embeds + 1.0)
else:
return item_embeds
def get_feed_dict(
model,
user_indices=None,
item_indices=None,
labels=None,
sparse_indices=None,
user_sparse_indices=None,
item_sparse_indices=None,
dense_values=None,
user_dense_values=None,
item_dense_values=None,
user_interacted_seq=None,
user_interacted_len=None,
is_training=False,
):
feed_dict = dict()
if hasattr(model, "user_indices") and user_indices is not None:
feed_dict.update({model.user_indices: user_indices})
if hasattr(model, "item_indices") and item_indices is not None:
feed_dict.update({model.item_indices: item_indices})
if hasattr(model, "labels") and labels is not None:
feed_dict.update({model.labels: labels})
if hasattr(model, "is_training"):
feed_dict.update({model.is_training: is_training})
if hasattr(model, "sparse_indices") and sparse_indices is not None:
feed_dict.update({model.sparse_indices: sparse_indices})
if hasattr(model, "user_sparse_indices") and user_sparse_indices is not None:
feed_dict.update({model.user_sparse_indices: user_sparse_indices})
if hasattr(model, "item_sparse_indices") and item_sparse_indices is not None:
feed_dict.update({model.item_sparse_indices: item_sparse_indices})
if hasattr(model, "dense_values") and dense_values is not None:
feed_dict.update({model.dense_values: dense_values})
if hasattr(model, "user_dense_values") and user_dense_values is not None:
feed_dict.update({model.user_dense_values: user_dense_values})
if hasattr(model, "item_dense_values") and item_dense_values is not None:
feed_dict.update({model.item_dense_values: item_dense_values})
if user_interacted_seq is not None:
feed_dict.update(
{
model.user_interacted_seq: user_interacted_seq,
model.user_interacted_len: user_interacted_len,
}
)
return feed_dict
def get_sparse_feed_dict(
model,
sparse_tensor_indices,
sparse_tensor_values,
user_sparse_indices=None,
user_dense_values=None,
batch_size=1,
is_training=False,
):
feed_dict = {
model.item_interaction_indices: sparse_tensor_indices,
model.item_interaction_values: sparse_tensor_values,
model.modified_batch_size: batch_size,
model.is_training: is_training,
}
if hasattr(model, "user_sparse_indices") and user_sparse_indices is not None:
feed_dict.update({model.user_sparse_indices: user_sparse_indices})
if hasattr(model, "user_dense_values") and user_dense_values is not None:
feed_dict.update({model.user_dense_values: user_dense_values})
return feed_dict
|
1c9f617cc23cf6603eafdef83bd54b33dd96e524
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayMarketingPassInstanceQueryResponse.py
|
4031ca611899387c385c0b9b8e46b93c090fc9e5
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,190
|
py
|
AlipayMarketingPassInstanceQueryResponse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.PassInstanceDetail import PassInstanceDetail
class AlipayMarketingPassInstanceQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayMarketingPassInstanceQueryResponse, self).__init__()
self._instance_list = None
self._page_num = None
self._page_size = None
self._total = None
self._total_page = None
@property
def instance_list(self):
return self._instance_list
@instance_list.setter
def instance_list(self, value):
if isinstance(value, list):
self._instance_list = list()
for i in value:
if isinstance(i, PassInstanceDetail):
self._instance_list.append(i)
else:
self._instance_list.append(PassInstanceDetail.from_alipay_dict(i))
@property
def page_num(self):
return self._page_num
@page_num.setter
def page_num(self, value):
self._page_num = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def total(self):
return self._total
@total.setter
def total(self, value):
self._total = value
@property
def total_page(self):
return self._total_page
@total_page.setter
def total_page(self, value):
self._total_page = value
def parse_response_content(self, response_content):
response = super(AlipayMarketingPassInstanceQueryResponse, self).parse_response_content(response_content)
if 'instance_list' in response:
self.instance_list = response['instance_list']
if 'page_num' in response:
self.page_num = response['page_num']
if 'page_size' in response:
self.page_size = response['page_size']
if 'total' in response:
self.total = response['total']
if 'total_page' in response:
self.total_page = response['total_page']
|
4e2b408489b685718c3473f77f9b9e6355c572bf
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/ruckus_unleashed/test_config_flow.py
|
c55d531b0cb7be1aa9d1c8872b1ad7322ac7bdaa
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 5,786
|
py
|
test_config_flow.py
|
"""Test the Ruckus Unleashed config flow."""
from datetime import timedelta
from unittest.mock import AsyncMock, patch
from aioruckus.const import (
ERROR_CONNECT_TEMPORARY,
ERROR_CONNECT_TIMEOUT,
ERROR_LOGIN_INCORRECT,
)
from aioruckus.exceptions import AuthenticationError
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.ruckus_unleashed.const import DOMAIN
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.util import utcnow
from . import CONFIG, DEFAULT_TITLE, RuckusAjaxApiPatchContext, mock_config_entry
from tests.common import async_fire_time_changed
async def test_form(hass: HomeAssistant) -> None:
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with RuckusAjaxApiPatchContext(), patch(
"homeassistant.components.ruckus_unleashed.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
CONFIG,
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == DEFAULT_TITLE
assert result2["data"] == CONFIG
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass: HomeAssistant) -> None:
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with RuckusAjaxApiPatchContext(
login_mock=AsyncMock(side_effect=AuthenticationError(ERROR_LOGIN_INCORRECT))
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
CONFIG,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_user_reauth(hass: HomeAssistant) -> None:
"""Test reauth."""
entry = mock_config_entry()
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_REAUTH}
)
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
assert "flow_id" in flows[0]
assert result["type"] == data_entry_flow.FlowResultType.FORM
assert result["step_id"] == "reauth_confirm"
result2 = await hass.config_entries.flow.async_configure(
flows[0]["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "new_name",
CONF_PASSWORD: "new_pass",
},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.FlowResultType.FORM
assert result2["step_id"] == "user"
async def test_form_cannot_connect(hass: HomeAssistant) -> None:
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with RuckusAjaxApiPatchContext(
login_mock=AsyncMock(side_effect=ConnectionError(ERROR_CONNECT_TIMEOUT))
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
CONFIG,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_unexpected_response(hass: HomeAssistant) -> None:
"""Test we handle unknown error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with RuckusAjaxApiPatchContext(
login_mock=AsyncMock(
side_effect=ConnectionRefusedError(ERROR_CONNECT_TEMPORARY)
)
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
CONFIG,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_cannot_connect_unknown_serial(hass: HomeAssistant) -> None:
"""Test we handle cannot connect error on invalid serial number."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with RuckusAjaxApiPatchContext(system_info={}):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
CONFIG,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_duplicate_error(hass: HomeAssistant) -> None:
"""Test we handle duplicate error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with RuckusAjaxApiPatchContext():
await hass.config_entries.flow.async_configure(
result["flow_id"],
CONFIG,
)
future = utcnow() + timedelta(minutes=60)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
CONFIG,
)
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
|
13f76ad5ff938125da1af565ee3d92d0e49a64da
|
afbae26b958b5ef20548402a65002dcc8e55b66a
|
/release/stubs.min/Tekla/Structures/ModelInternal_parts/AreWeUnitTesting.py
|
e0f86b9b1677479a30c66b8a337b8c917e4a06bd
|
[
"MIT"
] |
permissive
|
gtalarico/ironpython-stubs
|
d875cb8932c7644f807dc6fde9dd513d159e4f5c
|
c7f6a6cb197e3949e40a4880a0b2a44e72d0a940
|
refs/heads/master
| 2023-07-12T01:43:47.295560
| 2022-05-23T18:12:06
| 2022-05-23T18:12:06
| 95,340,553
| 235
| 88
|
NOASSERTION
| 2023-07-05T06:36:28
| 2017-06-25T05:30:46
|
Python
|
UTF-8
|
Python
| false
| false
| 68
|
py
|
AreWeUnitTesting.py
|
class AreWeUnitTesting(object):
# no doc
Value=False
__all__=[]
|
2638dbabd225ac59aa431a192d3a4e762e0cb789
|
fdbb74a95924e2677466614f6ab6e2bb13b2a95a
|
/third_party/python/Lib/tkinter/dnd.py
|
e0971a26adde51d2abf2ace33453a890e7b740e6
|
[
"Python-2.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft",
"ISC"
] |
permissive
|
jart/cosmopolitan
|
fb11b5658939023977060a7c6c71a74093d9cb44
|
0d748ad58e1063dd1f8560f18a0c75293b9415b7
|
refs/heads/master
| 2023-09-06T09:17:29.303607
| 2023-09-02T03:49:13
| 2023-09-02T03:50:18
| 272,457,606
| 11,887
| 435
|
ISC
| 2023-09-14T17:47:58
| 2020-06-15T14:16:13
|
C
|
UTF-8
|
Python
| false
| false
| 11,488
|
py
|
dnd.py
|
"""Drag-and-drop support for Tkinter.
This is very preliminary. I currently only support dnd *within* one
application, between different windows (or within the same window).
I am trying to make this as generic as possible -- not dependent on
the use of a particular widget or icon type, etc. I also hope that
this will work with Pmw.
To enable an object to be dragged, you must create an event binding
for it that starts the drag-and-drop process. Typically, you should
bind <ButtonPress> to a callback function that you write. The function
should call Tkdnd.dnd_start(source, event), where 'source' is the
object to be dragged, and 'event' is the event that invoked the call
(the argument to your callback function). Even though this is a class
instantiation, the returned instance should not be stored -- it will
be kept alive automatically for the duration of the drag-and-drop.
When a drag-and-drop is already in process for the Tk interpreter, the
call is *ignored*; this normally averts starting multiple simultaneous
dnd processes, e.g. because different button callbacks all
dnd_start().
The object is *not* necessarily a widget -- it can be any
application-specific object that is meaningful to potential
drag-and-drop targets.
Potential drag-and-drop targets are discovered as follows. Whenever
the mouse moves, and at the start and end of a drag-and-drop move, the
Tk widget directly under the mouse is inspected. This is the target
widget (not to be confused with the target object, yet to be
determined). If there is no target widget, there is no dnd target
object. If there is a target widget, and it has an attribute
dnd_accept, this should be a function (or any callable object). The
function is called as dnd_accept(source, event), where 'source' is the
object being dragged (the object passed to dnd_start() above), and
'event' is the most recent event object (generally a <Motion> event;
it can also be <ButtonPress> or <ButtonRelease>). If the dnd_accept()
function returns something other than None, this is the new dnd target
object. If dnd_accept() returns None, or if the target widget has no
dnd_accept attribute, the target widget's parent is considered as the
target widget, and the search for a target object is repeated from
there. If necessary, the search is repeated all the way up to the
root widget. If none of the target widgets can produce a target
object, there is no target object (the target object is None).
The target object thus produced, if any, is called the new target
object. It is compared with the old target object (or None, if there
was no old target widget). There are several cases ('source' is the
source object, and 'event' is the most recent event object):
- Both the old and new target objects are None. Nothing happens.
- The old and new target objects are the same object. Its method
dnd_motion(source, event) is called.
- The old target object was None, and the new target object is not
None. The new target object's method dnd_enter(source, event) is
called.
- The new target object is None, and the old target object is not
None. The old target object's method dnd_leave(source, event) is
called.
- The old and new target objects differ and neither is None. The old
target object's method dnd_leave(source, event), and then the new
target object's method dnd_enter(source, event) is called.
Once this is done, the new target object replaces the old one, and the
Tk mainloop proceeds. The return value of the methods mentioned above
is ignored; if they raise an exception, the normal exception handling
mechanisms take over.
The drag-and-drop processes can end in two ways: a final target object
is selected, or no final target object is selected. When a final
target object is selected, it will always have been notified of the
potential drop by a call to its dnd_enter() method, as described
above, and possibly one or more calls to its dnd_motion() method; its
dnd_leave() method has not been called since the last call to
dnd_enter(). The target is notified of the drop by a call to its
method dnd_commit(source, event).
If no final target object is selected, and there was an old target
object, its dnd_leave(source, event) method is called to complete the
dnd sequence.
Finally, the source object is notified that the drag-and-drop process
is over, by a call to source.dnd_end(target, event), specifying either
the selected target object, or None if no target object was selected.
The source object can use this to implement the commit action; this is
sometimes simpler than to do it in the target's dnd_commit(). The
target's dnd_commit() method could then simply be aliased to
dnd_leave().
At any time during a dnd sequence, the application can cancel the
sequence by calling the cancel() method on the object returned by
dnd_start(). This will call dnd_leave() if a target is currently
active; it will never call dnd_commit().
"""
import tkinter
# The factory function
def dnd_start(source, event):
h = DndHandler(source, event)
if h.root:
return h
else:
return None
# The class that does the work
class DndHandler:
root = None
def __init__(self, source, event):
if event.num > 5:
return
root = event.widget._root()
try:
root.__dnd
return # Don't start recursive dnd
except AttributeError:
root.__dnd = self
self.root = root
self.source = source
self.target = None
self.initial_button = button = event.num
self.initial_widget = widget = event.widget
self.release_pattern = "<B%d-ButtonRelease-%d>" % (button, button)
self.save_cursor = widget['cursor'] or ""
widget.bind(self.release_pattern, self.on_release)
widget.bind("<Motion>", self.on_motion)
widget['cursor'] = "hand2"
def __del__(self):
root = self.root
self.root = None
if root:
try:
del root.__dnd
except AttributeError:
pass
def on_motion(self, event):
x, y = event.x_root, event.y_root
target_widget = self.initial_widget.winfo_containing(x, y)
source = self.source
new_target = None
while target_widget:
try:
attr = target_widget.dnd_accept
except AttributeError:
pass
else:
new_target = attr(source, event)
if new_target:
break
target_widget = target_widget.master
old_target = self.target
if old_target is new_target:
if old_target:
old_target.dnd_motion(source, event)
else:
if old_target:
self.target = None
old_target.dnd_leave(source, event)
if new_target:
new_target.dnd_enter(source, event)
self.target = new_target
def on_release(self, event):
self.finish(event, 1)
def cancel(self, event=None):
self.finish(event, 0)
def finish(self, event, commit=0):
target = self.target
source = self.source
widget = self.initial_widget
root = self.root
try:
del root.__dnd
self.initial_widget.unbind(self.release_pattern)
self.initial_widget.unbind("<Motion>")
widget['cursor'] = self.save_cursor
self.target = self.source = self.initial_widget = self.root = None
if target:
if commit:
target.dnd_commit(source, event)
else:
target.dnd_leave(source, event)
finally:
source.dnd_end(target, event)
# ----------------------------------------------------------------------
# The rest is here for testing and demonstration purposes only!
class Icon:
def __init__(self, name):
self.name = name
self.canvas = self.label = self.id = None
def attach(self, canvas, x=10, y=10):
if canvas is self.canvas:
self.canvas.coords(self.id, x, y)
return
if self.canvas:
self.detach()
if not canvas:
return
label = tkinter.Label(canvas, text=self.name,
borderwidth=2, relief="raised")
id = canvas.create_window(x, y, window=label, anchor="nw")
self.canvas = canvas
self.label = label
self.id = id
label.bind("<ButtonPress>", self.press)
def detach(self):
canvas = self.canvas
if not canvas:
return
id = self.id
label = self.label
self.canvas = self.label = self.id = None
canvas.delete(id)
label.destroy()
def press(self, event):
if dnd_start(self, event):
# where the pointer is relative to the label widget:
self.x_off = event.x
self.y_off = event.y
# where the widget is relative to the canvas:
self.x_orig, self.y_orig = self.canvas.coords(self.id)
def move(self, event):
x, y = self.where(self.canvas, event)
self.canvas.coords(self.id, x, y)
def putback(self):
self.canvas.coords(self.id, self.x_orig, self.y_orig)
def where(self, canvas, event):
# where the corner of the canvas is relative to the screen:
x_org = canvas.winfo_rootx()
y_org = canvas.winfo_rooty()
# where the pointer is relative to the canvas widget:
x = event.x_root - x_org
y = event.y_root - y_org
# compensate for initial pointer offset
return x - self.x_off, y - self.y_off
def dnd_end(self, target, event):
pass
class Tester:
def __init__(self, root):
self.top = tkinter.Toplevel(root)
self.canvas = tkinter.Canvas(self.top, width=100, height=100)
self.canvas.pack(fill="both", expand=1)
self.canvas.dnd_accept = self.dnd_accept
def dnd_accept(self, source, event):
return self
def dnd_enter(self, source, event):
self.canvas.focus_set() # Show highlight border
x, y = source.where(self.canvas, event)
x1, y1, x2, y2 = source.canvas.bbox(source.id)
dx, dy = x2-x1, y2-y1
self.dndid = self.canvas.create_rectangle(x, y, x+dx, y+dy)
self.dnd_motion(source, event)
def dnd_motion(self, source, event):
x, y = source.where(self.canvas, event)
x1, y1, x2, y2 = self.canvas.bbox(self.dndid)
self.canvas.move(self.dndid, x-x1, y-y1)
def dnd_leave(self, source, event):
self.top.focus_set() # Hide highlight border
self.canvas.delete(self.dndid)
self.dndid = None
def dnd_commit(self, source, event):
self.dnd_leave(source, event)
x, y = source.where(self.canvas, event)
source.attach(self.canvas, x, y)
def test():
root = tkinter.Tk()
root.geometry("+1+1")
tkinter.Button(command=root.quit, text="Quit").pack()
t1 = Tester(root)
t1.top.geometry("+1+60")
t2 = Tester(root)
t2.top.geometry("+120+60")
t3 = Tester(root)
t3.top.geometry("+240+60")
i1 = Icon("ICON1")
i2 = Icon("ICON2")
i3 = Icon("ICON3")
i1.attach(t1.canvas)
i2.attach(t2.canvas)
i3.attach(t3.canvas)
root.mainloop()
if __name__ == '__main__':
test()
|
2baedd5e5f362ba578e02de605d3ee73008be181
|
0e4860fecfdd34a3255003cc8c8df086c14083dd
|
/python/source_code/source_code_of_lp3thw/ex34.py
|
a3b1886a648cffae03d4e3e344dbd26efde667db
|
[] |
no_license
|
anzhihe/learning
|
503ab9a58f280227011da5eaa4b14b46c678e6f3
|
66f7f801e1395207778484e1543ea26309d4b354
|
refs/heads/master
| 2023-08-08T11:42:11.983677
| 2023-07-29T09:19:47
| 2023-07-29T09:19:47
| 188,768,643
| 1,443
| 617
| null | 2023-08-24T02:10:34
| 2019-05-27T04:04:10
|
Python
|
UTF-8
|
Python
| false
| false
| 634
|
py
|
ex34.py
|
animals =['bear','python','peacock','kangaroo','whale','platypus']
bear= animals[0]
for i in range(0,6):#思考,这里按照“基数(cardinal number)”,是0~6共7个数,为啥只打印粗6个?
print(f"This is the {i} index of animals.")
print(animals[i])
print("--------START NOW----------")
print("The animal at 1 is: ",animals[1])
print("The 3rd animal is: ",animals[2])
print("The 1st animal is: ",animals[0])
print("The animal at 3: ",animals[3])
print("The 5th animal is: ",animals[4])
print("The animal at 2 is: ",animals[2])
print("The 6th animal is: ",animals[5])
print("The animal at 4 is: ",animals[4])
|
ee3112f67bf07f9b551b71b1a456525f66f54419
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/completion/initParams.after.py
|
3b8b83a7d7d0e4d9975a051b9136b3cac3afc3eb
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 63
|
py
|
initParams.after.py
|
class C:
def __init__(self, auno=True): pass
c = C(auno=)
|
5826103e4d38568124f1fc60a7e310d9831c16b9
|
d5b021ce99ceb48c8d0dab315a0d0d0f4464454a
|
/cert_issuer/__main__.py
|
0c02f205e737c9882c357cf4d7878d7250e44b62
|
[
"MIT"
] |
permissive
|
blockchain-certificates/cert-issuer
|
d1aad76cb8977493f7c00b643bc67833676a02cd
|
d8d36f5e207dee130ed2e08858b2f80ab919d992
|
refs/heads/master
| 2023-09-02T15:47:26.953546
| 2023-07-31T10:58:23
| 2023-07-31T10:58:23
| 50,261,635
| 389
| 233
|
MIT
| 2023-08-17T03:49:12
| 2016-01-23T22:28:58
|
Python
|
UTF-8
|
Python
| false
| false
| 544
|
py
|
__main__.py
|
#!/usr/bin/env python3
import os.path
import sys
PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if __package__ is None and not hasattr(sys, 'frozen'):
path = os.path.realpath(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(os.path.dirname(path)))
def cert_issuer_main(args=None):
from cert_issuer import config
parsed_config = config.get_config()
from cert_issuer import issue_certificates
issue_certificates.main(parsed_config)
if __name__ == '__main__':
cert_issuer_main()
|
cdcf66a082a878e35ebbeb5b9d9a288edc0b3b29
|
4ff92eb1c248c9139b798e042f333f9b151f634c
|
/src/physt/compat/pandas.py
|
30649262138c6811e3820b521022aa9cc53cdf88
|
[
"MIT"
] |
permissive
|
janpipek/physt
|
9ca95b1cbc0a78ca43b38ecbf986f49a0bdb8909
|
5c1717ea411b43ccebf7591f4949e3a4fe6fc1ff
|
refs/heads/dev
| 2023-04-09T00:12:24.350424
| 2022-11-28T13:03:30
| 2022-11-28T13:03:30
| 54,721,043
| 132
| 17
|
MIT
| 2023-01-30T12:10:55
| 2016-03-25T13:41:56
|
Python
|
UTF-8
|
Python
| false
| false
| 9,681
|
py
|
pandas.py
|
"""Pandas integration.
- conversion between histograms and Series/DataFrames
- .physt accessor for pandas objects
"""
from __future__ import annotations
import warnings
from typing import TYPE_CHECKING, NoReturn, Optional, Tuple, cast
import numpy as np
import pandas
import pandas as pd
from pandas.api.types import is_numeric_dtype
from pandas.core.arrays.masked import BaseMaskedArray, BaseMaskedDtype
from physt._construction import calculate_1d_bins, extract_1d_array, extract_nd_array
from physt._facade import h, h1
from physt.binnings import BinningBase, static_binning
from physt.types import Histogram1D, Histogram2D, HistogramND
if TYPE_CHECKING:
from typing import Any, Optional, Union
from physt.typing_aliases import ArrayLike
@extract_1d_array.register
def _(series: pandas.Series, *, dropna: bool = True) -> Tuple[np.ndarray, Optional[np.ndarray]]:
if not pd.api.types.is_numeric_dtype(series):
raise ValueError(f"Cannot extract suitable array from non-numeric dtype: {series.dtype}")
series = series.astype(float)
# if isinstance(series.dtype, BaseMaskedDtype):
# array = cast(BaseMaskedArray, series.array)
# if not dropna and any(array.mask):
# raise ValueError("Cannot histogram series with NA's. Set `dropna` to True to override.")
# array_mask = ~array._mask
# array = array._data[~array._mask]
if dropna:
array_mask = series.notna().values
array = series.dropna().values
else:
array_mask = None
array = series.values
return array, array_mask
@extract_1d_array.register
def _(dataframe: pd.DataFrame, **kwargs) -> NoReturn:
# TODO: What about dataframes with just one column?
raise ValueError(
"Cannot extract 1D array suitable for histogramming from a dataframe. "
"Either select a Series or extract multidimensional data."
)
@extract_nd_array.register
def _(series: pd.Series, **kwargs) -> NoReturn:
raise ValueError(
"Cannot extract multidimensional array suitable for histogramming from a series. "
"Either select a DataFrame or extract 1D data."
)
@extract_nd_array.register
def _(
data_frame: pd.DataFrame, *, dim: Optional[int] = None, dropna: bool = True
) -> Tuple[int, np.ndarray, Optional[np.ndarray]]:
if non_numeric_columns := [
name for name, series in data_frame.items() if not is_numeric_dtype(series)
]:
raise ValueError(f"Cannot histogram non-numeric columns: {non_numeric_columns}")
if dim and dim != data_frame.shape[1]:
raise ValueError(f"Invalid dim {data_frame.shape[1]}, {dim} expected.")
if dropna:
array_mask = data_frame.isna().any().values
data_frame = data_frame.dropna()
else:
array_mask = None
array = data_frame.astype(float).values
return data_frame.shape[1], array, array_mask
@pandas.api.extensions.register_series_accessor("physt")
class PhystSeriesAccessor:
"""Histogramming methods for pandas Series.
It exists only for numeric series.
"""
def __init__(self, series: pandas.Series):
if not is_numeric_dtype(series):
raise AttributeError(f"Series must be of a numeric type, not {series.dtype}")
self._series = series
def h1(self, bins=None, **kwargs) -> Histogram1D:
"""Create a histogram from the series."""
return h1(self._series, bins=bins, **kwargs)
histogram = h1
def cut(self, bins=None, **kwargs) -> pd.Series:
"""Bin values using physt binning (eq. to pd.cut)."""
warnings.warn("This method is experimental, only partially implemented and may removed.")
binning = calculate_1d_bins(extract_1d_array(self._series, dropna=True)[0], bins, **kwargs)
return pd.cut(self._series, binning.numpy_bins)
@pandas.api.extensions.register_dataframe_accessor("physt")
class PhystDataFrameAccessor:
"""Histogramming methods for pandas DataFrames."""
def __init__(self, df: pandas.DataFrame):
self._df = df
def h1(
self,
column: Any = None,
bins=None,
*,
weights: Union[ArrayLike, str, None] = None,
**kwargs,
) -> Histogram1D:
"""Create 1D histogram from a column.
Parameters
----------
column: Name of the column to apply on (not required for 1-column data frames)
bins: Universal `bins` argument
weights: Name of the column to use for weight or some arraylike object
See Also
--------
physt.h1
"""
if column is None:
if self._df.shape[1] != 1:
raise ValueError("Argument `column` must be set.")
column = self._df.columns[0]
try:
data = self._df[column]
except KeyError as exc:
raise KeyError(f"Column '{column}' not found.") from exc
if not isinstance(data, pd.Series):
raise ValueError(f"Argument `column` must select a single series: {column}")
if isinstance(weights, str) and weights in self._df.columns:
# TODO: This might be wrong if NAs are in play
weights = self._df[weights]
if not is_numeric_dtype(data):
raise ValueError(f"Column '{column}' is not numeric.")
return data.physt.h1(bins=bins, weights=weights, **kwargs)
def h2(self, column1: Any = None, column2: Any = None, bins=None, **kwargs) -> Histogram2D:
"""Create 2D histogram from two columns.
Parameters
----------
column1: Name of the first column (not required for 2-column data frames)
column2: Name of the second column (not required for 2-column data frames)
bins: Universal `bins` argument
dropna: Ignore NA values
See Also
--------
physt.h2
"""
if self._df.shape[1] < 2:
raise ValueError("At least two columns required for 2D histograms.")
if column1 is None and column2 is None and self._df.shape[1] == 2:
column1, column2 = self._df.columns
elif column1 is None or column2 is None:
raise ValueError("Arguments `column1` and `column2` must be set.")
return cast(Histogram2D, self.histogram([column1, column2], bins=bins, **kwargs))
def histogram(self, columns: Any = None, bins: Any = None, **kwargs) -> HistogramND:
"""Create a histogram.
Parameters
----------
columns: The column(s) to apply on. Uses all columns if not set. It can be
a `str` for one column, `tuple` for a multi-level index, `list` for
more columns, everything that pandas item selection supports.
bins: Argument to be passed to find the proper binnings.
Returns
-------
A histogram with dimensionality depending on the final set of columns.
See Also
--------
physt.h
"""
if columns is None:
columns = self._df.columns
try:
data = self._df[columns]
except KeyError as exc:
raise KeyError(f"At least one of the columns '{columns}' could not be found.") from exc
if isinstance(data, pd.Series) or data.shape[1] == 1:
return data.physt.h1(bins, **kwargs)
if not isinstance(data, pd.DataFrame):
raise TypeError(f"Argument `columns` does not select a DataFrame: '{columns}'")
if not data.shape[1]:
raise ValueError("Cannot make histogram from DataFrame with no columns.")
for column in data.columns:
if not is_numeric_dtype(data[column]):
raise ValueError(f"Column '{column}' is not numeric")
# TODO: Enable weights to be a name of the column
# TODO: Unify for masked arrays
return h(data=data.astype(float), bins=bins, **kwargs)
def binning_to_index(binning: BinningBase, name: Optional[str] = None) -> pandas.IntervalIndex:
"""Convert physt binning to a pandas interval index."""
# TODO: Check closedness
return pandas.IntervalIndex.from_arrays(
left=binning.bins[:, 0], right=binning.bins[:, 1], closed="left", name=name
)
def index_to_binning(index: pandas.IntervalIndex) -> BinningBase:
"""Convert an interval index into physt binning."""
if not isinstance(index, pandas.IntervalIndex):
raise TypeError(f"IntervalIndex required, '{type(index)}' passed.")
if not index.closed_left:
raise ValueError("Only `closed_left` indices supported.")
if index.is_overlapping:
raise ValueError("Intervals cannot overlap.")
bins = np.hstack([index.left.values[:, np.newaxis], index.right.values[:, np.newaxis]])
return static_binning(bins=bins)
def _h1_to_dataframe(h1: Histogram1D) -> pandas.DataFrame:
"""Convert histogram to pandas DataFrame."""
return pandas.DataFrame(
{"frequency": h1.frequencies, "error": h1.errors},
index=binning_to_index(h1.binning, name=h1.name),
)
def _h1_to_series(h1: Histogram1D) -> pandas.Series:
"""Convert histogram to pandas Series."""
return pandas.Series(
h1.frequencies,
name="frequency",
index=binning_to_index(h1.binning, name=h1.name),
)
setattr(Histogram1D, "to_dataframe", _h1_to_dataframe)
setattr(Histogram1D, "to_series", _h1_to_series)
# TODO: Implement multidimensional binning to index
# TODO: Implement multidimensional histogram to series/dataframe
# TODO: Implement histogram collection to series/dataframe
# TODO: Implement histogram collection from dataframe / groupby ?
# TODO: Implement multidimensional index to binning
|
46c8198a89be0d2bba6e32ad58dbf88ddcab8b2a
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/recorder/history/modern.py
|
68c357c0ed4375cb848cc0f8a962fd1644684271
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 26,326
|
py
|
modern.py
|
"""Provide pre-made queries on top of the recorder component."""
from __future__ import annotations
from collections.abc import Callable, Iterable, Iterator, MutableMapping
from datetime import datetime
from itertools import groupby
from operator import itemgetter
from typing import Any, cast
from sqlalchemy import (
CompoundSelect,
Select,
Subquery,
and_,
func,
lambda_stmt,
literal,
select,
union_all,
)
from sqlalchemy.engine.row import Row
from sqlalchemy.orm.session import Session
from homeassistant.const import COMPRESSED_STATE_LAST_UPDATED, COMPRESSED_STATE_STATE
from homeassistant.core import HomeAssistant, State, split_entity_id
import homeassistant.util.dt as dt_util
from ... import recorder
from ..db_schema import SHARED_ATTR_OR_LEGACY_ATTRIBUTES, StateAttributes, States
from ..filters import Filters
from ..models import (
LazyState,
datetime_to_timestamp_or_none,
extract_metadata_ids,
process_timestamp,
row_to_compressed_state,
)
from ..util import execute_stmt_lambda_element, session_scope
from .const import (
LAST_CHANGED_KEY,
NEED_ATTRIBUTE_DOMAINS,
SIGNIFICANT_DOMAINS,
STATE_KEY,
)
_FIELD_MAP = {
"metadata_id": 0,
"state": 1,
"last_updated_ts": 2,
}
def _stmt_and_join_attributes(
no_attributes: bool, include_last_changed: bool
) -> Select:
"""Return the statement and if StateAttributes should be joined."""
_select = select(States.metadata_id, States.state, States.last_updated_ts)
if include_last_changed:
_select = _select.add_columns(States.last_changed_ts)
if not no_attributes:
_select = _select.add_columns(SHARED_ATTR_OR_LEGACY_ATTRIBUTES)
return _select
def _stmt_and_join_attributes_for_start_state(
no_attributes: bool, include_last_changed: bool
) -> Select:
"""Return the statement and if StateAttributes should be joined."""
_select = select(States.metadata_id, States.state)
_select = _select.add_columns(literal(value=0).label("last_updated_ts"))
if include_last_changed:
_select = _select.add_columns(literal(value=0).label("last_changed_ts"))
if not no_attributes:
_select = _select.add_columns(SHARED_ATTR_OR_LEGACY_ATTRIBUTES)
return _select
def _select_from_subquery(
subquery: Subquery | CompoundSelect, no_attributes: bool, include_last_changed: bool
) -> Select:
"""Return the statement to select from the union."""
base_select = select(
subquery.c.metadata_id,
subquery.c.state,
subquery.c.last_updated_ts,
)
if include_last_changed:
base_select = base_select.add_columns(subquery.c.last_changed_ts)
if no_attributes:
return base_select
return base_select.add_columns(subquery.c.attributes)
def get_significant_states(
hass: HomeAssistant,
start_time: datetime,
end_time: datetime | None = None,
entity_ids: list[str] | None = None,
filters: Filters | None = None,
include_start_time_state: bool = True,
significant_changes_only: bool = True,
minimal_response: bool = False,
no_attributes: bool = False,
compressed_state_format: bool = False,
) -> MutableMapping[str, list[State | dict[str, Any]]]:
"""Wrap get_significant_states_with_session with an sql session."""
with session_scope(hass=hass, read_only=True) as session:
return get_significant_states_with_session(
hass,
session,
start_time,
end_time,
entity_ids,
filters,
include_start_time_state,
significant_changes_only,
minimal_response,
no_attributes,
compressed_state_format,
)
def _significant_states_stmt(
start_time_ts: float,
end_time_ts: float | None,
single_metadata_id: int | None,
metadata_ids: list[int],
metadata_ids_in_significant_domains: list[int],
significant_changes_only: bool,
no_attributes: bool,
include_start_time_state: bool,
run_start_ts: float | None,
) -> Select | CompoundSelect:
"""Query the database for significant state changes."""
include_last_changed = not significant_changes_only
stmt = _stmt_and_join_attributes(no_attributes, include_last_changed)
if significant_changes_only:
# Since we are filtering on entity_id (metadata_id) we can avoid
# the join of the states_meta table since we already know which
# metadata_ids are in the significant domains.
if metadata_ids_in_significant_domains:
stmt = stmt.filter(
States.metadata_id.in_(metadata_ids_in_significant_domains)
| (States.last_changed_ts == States.last_updated_ts)
| States.last_changed_ts.is_(None)
)
else:
stmt = stmt.filter(
(States.last_changed_ts == States.last_updated_ts)
| States.last_changed_ts.is_(None)
)
stmt = stmt.filter(States.metadata_id.in_(metadata_ids)).filter(
States.last_updated_ts > start_time_ts
)
if end_time_ts:
stmt = stmt.filter(States.last_updated_ts < end_time_ts)
if not no_attributes:
stmt = stmt.outerjoin(
StateAttributes, States.attributes_id == StateAttributes.attributes_id
)
if not include_start_time_state or not run_start_ts:
stmt = stmt.order_by(States.metadata_id, States.last_updated_ts)
return stmt
unioned_subquery = union_all(
_select_from_subquery(
_get_start_time_state_stmt(
run_start_ts,
start_time_ts,
single_metadata_id,
metadata_ids,
no_attributes,
include_last_changed,
).subquery(),
no_attributes,
include_last_changed,
),
_select_from_subquery(stmt.subquery(), no_attributes, include_last_changed),
).subquery()
return _select_from_subquery(
unioned_subquery,
no_attributes,
include_last_changed,
).order_by(unioned_subquery.c.metadata_id, unioned_subquery.c.last_updated_ts)
def get_significant_states_with_session(
hass: HomeAssistant,
session: Session,
start_time: datetime,
end_time: datetime | None = None,
entity_ids: list[str] | None = None,
filters: Filters | None = None,
include_start_time_state: bool = True,
significant_changes_only: bool = True,
minimal_response: bool = False,
no_attributes: bool = False,
compressed_state_format: bool = False,
) -> MutableMapping[str, list[State | dict[str, Any]]]:
"""Return states changes during UTC period start_time - end_time.
entity_ids is an optional iterable of entities to include in the results.
filters is an optional SQLAlchemy filter which will be applied to the database
queries unless entity_ids is given, in which case its ignored.
Significant states are all states where there is a state change,
as well as all states from certain domains (for instance
thermostat so that we get current temperature in our graphs).
"""
if filters is not None:
raise NotImplementedError("Filters are no longer supported")
if not entity_ids:
raise ValueError("entity_ids must be provided")
entity_id_to_metadata_id: dict[str, int | None] | None = None
metadata_ids_in_significant_domains: list[int] = []
instance = recorder.get_instance(hass)
if not (
entity_id_to_metadata_id := instance.states_meta_manager.get_many(
entity_ids, session, False
)
) or not (possible_metadata_ids := extract_metadata_ids(entity_id_to_metadata_id)):
return {}
metadata_ids = possible_metadata_ids
if significant_changes_only:
metadata_ids_in_significant_domains = [
metadata_id
for entity_id, metadata_id in entity_id_to_metadata_id.items()
if metadata_id is not None
and split_entity_id(entity_id)[0] in SIGNIFICANT_DOMAINS
]
run_start_ts: float | None = None
if include_start_time_state and not (
run_start_ts := _get_run_start_ts_for_utc_point_in_time(hass, start_time)
):
include_start_time_state = False
start_time_ts = dt_util.utc_to_timestamp(start_time)
end_time_ts = datetime_to_timestamp_or_none(end_time)
single_metadata_id = metadata_ids[0] if len(metadata_ids) == 1 else None
stmt = lambda_stmt(
lambda: _significant_states_stmt(
start_time_ts,
end_time_ts,
single_metadata_id,
metadata_ids,
metadata_ids_in_significant_domains,
significant_changes_only,
no_attributes,
include_start_time_state,
run_start_ts,
),
track_on=[
bool(single_metadata_id),
bool(metadata_ids_in_significant_domains),
bool(end_time_ts),
significant_changes_only,
no_attributes,
include_start_time_state,
],
)
return _sorted_states_to_dict(
execute_stmt_lambda_element(session, stmt, None, end_time, orm_rows=False),
start_time_ts if include_start_time_state else None,
entity_ids,
entity_id_to_metadata_id,
minimal_response,
compressed_state_format,
no_attributes=no_attributes,
)
def get_full_significant_states_with_session(
hass: HomeAssistant,
session: Session,
start_time: datetime,
end_time: datetime | None = None,
entity_ids: list[str] | None = None,
filters: Filters | None = None,
include_start_time_state: bool = True,
significant_changes_only: bool = True,
no_attributes: bool = False,
) -> MutableMapping[str, list[State]]:
"""Variant of get_significant_states_with_session.
Difference with get_significant_states_with_session is that it does not
return minimal responses.
"""
return cast(
MutableMapping[str, list[State]],
get_significant_states_with_session(
hass=hass,
session=session,
start_time=start_time,
end_time=end_time,
entity_ids=entity_ids,
filters=filters,
include_start_time_state=include_start_time_state,
significant_changes_only=significant_changes_only,
minimal_response=False,
no_attributes=no_attributes,
),
)
def _state_changed_during_period_stmt(
start_time_ts: float,
end_time_ts: float | None,
single_metadata_id: int,
no_attributes: bool,
limit: int | None,
include_start_time_state: bool,
run_start_ts: float | None,
) -> Select | CompoundSelect:
stmt = (
_stmt_and_join_attributes(no_attributes, False)
.filter(
(
(States.last_changed_ts == States.last_updated_ts)
| States.last_changed_ts.is_(None)
)
& (States.last_updated_ts > start_time_ts)
)
.filter(States.metadata_id == single_metadata_id)
)
if end_time_ts:
stmt = stmt.filter(States.last_updated_ts < end_time_ts)
if not no_attributes:
stmt = stmt.outerjoin(
StateAttributes, States.attributes_id == StateAttributes.attributes_id
)
if limit:
stmt = stmt.limit(limit)
stmt = stmt.order_by(
States.metadata_id,
States.last_updated_ts,
)
if not include_start_time_state or not run_start_ts:
return stmt
return _select_from_subquery(
union_all(
_select_from_subquery(
_get_single_entity_start_time_stmt(
start_time_ts,
single_metadata_id,
no_attributes,
False,
).subquery(),
no_attributes,
False,
),
_select_from_subquery(
stmt.subquery(),
no_attributes,
False,
),
).subquery(),
no_attributes,
False,
)
def state_changes_during_period(
hass: HomeAssistant,
start_time: datetime,
end_time: datetime | None = None,
entity_id: str | None = None,
no_attributes: bool = False,
descending: bool = False,
limit: int | None = None,
include_start_time_state: bool = True,
) -> MutableMapping[str, list[State]]:
"""Return states changes during UTC period start_time - end_time."""
if not entity_id:
raise ValueError("entity_id must be provided")
entity_ids = [entity_id.lower()]
with session_scope(hass=hass, read_only=True) as session:
instance = recorder.get_instance(hass)
if not (
possible_metadata_id := instance.states_meta_manager.get(
entity_id, session, False
)
):
return {}
single_metadata_id = possible_metadata_id
entity_id_to_metadata_id: dict[str, int | None] = {
entity_id: single_metadata_id
}
run_start_ts: float | None = None
if include_start_time_state and not (
run_start_ts := _get_run_start_ts_for_utc_point_in_time(hass, start_time)
):
include_start_time_state = False
start_time_ts = dt_util.utc_to_timestamp(start_time)
end_time_ts = datetime_to_timestamp_or_none(end_time)
stmt = lambda_stmt(
lambda: _state_changed_during_period_stmt(
start_time_ts,
end_time_ts,
single_metadata_id,
no_attributes,
limit,
include_start_time_state,
run_start_ts,
),
track_on=[
bool(end_time_ts),
no_attributes,
bool(limit),
include_start_time_state,
],
)
return cast(
MutableMapping[str, list[State]],
_sorted_states_to_dict(
execute_stmt_lambda_element(
session, stmt, None, end_time, orm_rows=False
),
start_time_ts if include_start_time_state else None,
entity_ids,
entity_id_to_metadata_id,
descending=descending,
no_attributes=no_attributes,
),
)
def _get_last_state_changes_single_stmt(metadata_id: int) -> Select:
return (
_stmt_and_join_attributes(False, False)
.join(
(
lastest_state_for_metadata_id := (
select(
States.metadata_id.label("max_metadata_id"),
func.max(States.last_updated_ts).label("max_last_updated"),
)
.filter(States.metadata_id == metadata_id)
.group_by(States.metadata_id)
.subquery()
)
),
and_(
States.metadata_id == lastest_state_for_metadata_id.c.max_metadata_id,
States.last_updated_ts
== lastest_state_for_metadata_id.c.max_last_updated,
),
)
.outerjoin(
StateAttributes, States.attributes_id == StateAttributes.attributes_id
)
.order_by(States.state_id.desc())
)
def _get_last_state_changes_multiple_stmt(
number_of_states: int, metadata_id: int
) -> Select:
return (
_stmt_and_join_attributes(False, False)
.where(
States.state_id
== (
select(States.state_id)
.filter(States.metadata_id == metadata_id)
.order_by(States.last_updated_ts.desc())
.limit(number_of_states)
.subquery()
).c.state_id
)
.outerjoin(
StateAttributes, States.attributes_id == StateAttributes.attributes_id
)
.order_by(States.state_id.desc())
)
def get_last_state_changes(
hass: HomeAssistant, number_of_states: int, entity_id: str
) -> MutableMapping[str, list[State]]:
"""Return the last number_of_states."""
entity_id_lower = entity_id.lower()
entity_ids = [entity_id_lower]
# Calling this function with number_of_states > 1 can cause instability
# because it has to scan the table to find the last number_of_states states
# because the metadata_id_last_updated_ts index is in ascending order.
with session_scope(hass=hass, read_only=True) as session:
instance = recorder.get_instance(hass)
if not (
possible_metadata_id := instance.states_meta_manager.get(
entity_id, session, False
)
):
return {}
metadata_id = possible_metadata_id
entity_id_to_metadata_id: dict[str, int | None] = {entity_id_lower: metadata_id}
if number_of_states == 1:
stmt = lambda_stmt(
lambda: _get_last_state_changes_single_stmt(metadata_id),
)
else:
stmt = lambda_stmt(
lambda: _get_last_state_changes_multiple_stmt(
number_of_states, metadata_id
),
)
states = list(execute_stmt_lambda_element(session, stmt, orm_rows=False))
return cast(
MutableMapping[str, list[State]],
_sorted_states_to_dict(
reversed(states),
None,
entity_ids,
entity_id_to_metadata_id,
no_attributes=False,
),
)
def _get_start_time_state_for_entities_stmt(
run_start_ts: float,
epoch_time: float,
metadata_ids: list[int],
no_attributes: bool,
include_last_changed: bool,
) -> Select:
"""Baked query to get states for specific entities."""
# We got an include-list of entities, accelerate the query by filtering already
# in the inner query.
stmt = _stmt_and_join_attributes_for_start_state(
no_attributes, include_last_changed
).join(
(
most_recent_states_for_entities_by_date := (
select(
States.metadata_id.label("max_metadata_id"),
func.max(States.last_updated_ts).label("max_last_updated"),
)
.filter(
(States.last_updated_ts >= run_start_ts)
& (States.last_updated_ts < epoch_time)
)
.filter(States.metadata_id.in_(metadata_ids))
.group_by(States.metadata_id)
.subquery()
)
),
and_(
States.metadata_id
== most_recent_states_for_entities_by_date.c.max_metadata_id,
States.last_updated_ts
== most_recent_states_for_entities_by_date.c.max_last_updated,
),
)
if no_attributes:
return stmt
return stmt.outerjoin(
StateAttributes, (States.attributes_id == StateAttributes.attributes_id)
)
def _get_run_start_ts_for_utc_point_in_time(
hass: HomeAssistant, utc_point_in_time: datetime
) -> float | None:
"""Return the start time of a run."""
run = recorder.get_instance(hass).recorder_runs_manager.get(utc_point_in_time)
if (
run is not None
and (run_start := process_timestamp(run.start)) < utc_point_in_time
):
return run_start.timestamp()
# History did not run before utc_point_in_time but we still
return None
def _get_start_time_state_stmt(
run_start_ts: float,
epoch_time: float,
single_metadata_id: int | None,
metadata_ids: list[int],
no_attributes: bool,
include_last_changed: bool,
) -> Select:
"""Return the states at a specific point in time."""
if single_metadata_id:
# Use an entirely different (and extremely fast) query if we only
# have a single entity id
return _get_single_entity_start_time_stmt(
epoch_time,
single_metadata_id,
no_attributes,
include_last_changed,
)
# We have more than one entity to look at so we need to do a query on states
# since the last recorder run started.
return _get_start_time_state_for_entities_stmt(
run_start_ts,
epoch_time,
metadata_ids,
no_attributes,
include_last_changed,
)
def _get_single_entity_start_time_stmt(
epoch_time: float,
metadata_id: int,
no_attributes: bool,
include_last_changed: bool,
) -> Select:
# Use an entirely different (and extremely fast) query if we only
# have a single entity id
stmt = (
_stmt_and_join_attributes_for_start_state(no_attributes, include_last_changed)
.filter(
States.last_updated_ts < epoch_time,
States.metadata_id == metadata_id,
)
.order_by(States.last_updated_ts.desc())
.limit(1)
)
if no_attributes:
return stmt
return stmt.outerjoin(
StateAttributes, States.attributes_id == StateAttributes.attributes_id
)
def _sorted_states_to_dict(
states: Iterable[Row],
start_time_ts: float | None,
entity_ids: list[str],
entity_id_to_metadata_id: dict[str, int | None],
minimal_response: bool = False,
compressed_state_format: bool = False,
descending: bool = False,
no_attributes: bool = False,
) -> MutableMapping[str, list[State | dict[str, Any]]]:
"""Convert SQL results into JSON friendly data structure.
This takes our state list and turns it into a JSON friendly data
structure {'entity_id': [list of states], 'entity_id2': [list of states]}
States must be sorted by entity_id and last_updated
We also need to go back and create a synthetic zero data point for
each list of states, otherwise our graphs won't start on the Y
axis correctly.
"""
field_map = _FIELD_MAP
state_class: Callable[
[Row, dict[str, dict[str, Any]], float | None, str, str, float | None, bool],
State | dict[str, Any],
]
if compressed_state_format:
state_class = row_to_compressed_state
attr_time = COMPRESSED_STATE_LAST_UPDATED
attr_state = COMPRESSED_STATE_STATE
else:
state_class = LazyState
attr_time = LAST_CHANGED_KEY
attr_state = STATE_KEY
# Set all entity IDs to empty lists in result set to maintain the order
result: dict[str, list[State | dict[str, Any]]] = {
entity_id: [] for entity_id in entity_ids
}
metadata_id_to_entity_id: dict[int, str] = {}
metadata_id_to_entity_id = {
v: k for k, v in entity_id_to_metadata_id.items() if v is not None
}
# Get the states at the start time
if len(entity_ids) == 1:
metadata_id = entity_id_to_metadata_id[entity_ids[0]]
assert metadata_id is not None # should not be possible if we got here
states_iter: Iterable[tuple[int, Iterator[Row]]] = (
(metadata_id, iter(states)),
)
else:
key_func = itemgetter(field_map["metadata_id"])
states_iter = groupby(states, key_func)
state_idx = field_map["state"]
last_updated_ts_idx = field_map["last_updated_ts"]
# Append all changes to it
for metadata_id, group in states_iter:
entity_id = metadata_id_to_entity_id[metadata_id]
attr_cache: dict[str, dict[str, Any]] = {}
ent_results = result[entity_id]
if (
not minimal_response
or split_entity_id(entity_id)[0] in NEED_ATTRIBUTE_DOMAINS
):
ent_results.extend(
state_class(
db_state,
attr_cache,
start_time_ts,
entity_id,
db_state[state_idx],
db_state[last_updated_ts_idx],
False,
)
for db_state in group
)
continue
prev_state: str | None = None
# With minimal response we only provide a native
# State for the first and last response. All the states
# in-between only provide the "state" and the
# "last_changed".
if not ent_results:
if (first_state := next(group, None)) is None:
continue
prev_state = first_state[state_idx]
ent_results.append(
state_class(
first_state,
attr_cache,
start_time_ts,
entity_id,
prev_state, # type: ignore[arg-type]
first_state[last_updated_ts_idx],
no_attributes,
)
)
#
# minimal_response only makes sense with last_updated == last_updated
#
# We use last_updated for for last_changed since its the same
#
# With minimal response we do not care about attribute
# changes so we can filter out duplicate states
if compressed_state_format:
# Compressed state format uses the timestamp directly
ent_results.extend(
{
attr_state: (prev_state := state),
attr_time: row[last_updated_ts_idx],
}
for row in group
if (state := row[state_idx]) != prev_state
)
continue
# Non-compressed state format returns an ISO formatted string
_utc_from_timestamp = dt_util.utc_from_timestamp
ent_results.extend(
{
attr_state: (prev_state := state), # noqa: F841
attr_time: _utc_from_timestamp(row[last_updated_ts_idx]).isoformat(),
}
for row in group
if (state := row[state_idx]) != prev_state
)
if descending:
for ent_results in result.values():
ent_results.reverse()
# Filter out the empty lists if some states had 0 results.
return {key: val for key, val in result.items() if val}
|
e3371529cc117e0a986899395048bdf87da26e25
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/DQMOffline/Trigger/python/B2GMonitoring_Client_cff.py
|
4703dea5b665987481563051bd065cb0d8b9ac89
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 4,665
|
py
|
B2GMonitoring_Client_cff.py
|
import FWCore.ParameterSet.Config as cms
b2gjetEfficiency = cms.EDProducer("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/B2G/*"),
verbose = cms.untracked.uint32(0), # Set to 2 for all messages
resolution = cms.vstring(),
efficiency = cms.vstring(
"effic_pfjetpT 'Jet pT turnON; PFJet(pT) [GeV]; efficiency' pfjetpT_numerator pfjetpT_denominator",
"effic_pfjetpT_variable 'Jet pT turnON; PFJet(pT) [GeV]; efficiency' pfjetpT_variable_numerator pfjetpT_variable_denominator",
"effic_pfjetPhi 'Jet efficiency vs #phi; PF Jet #phi [rad]; efficiency' pfjetPhi_numerator pfjetPhi_denominator",
"effic_pfjetEta 'Jet efficiency vs #eta; PF Jet #eta [rad]; efficiency' pfjetEta_numerator pfjetEta_denominator",
"effic_ht 'HT turnON; PF HT [GeV]; efficiency' ht_numerator ht_denominator",
"effic_ht_variable 'HT turnON; PF HT [GeV]; efficiency' ht_variable_numerator ht_variable_denominator",
"effic_mjj_variable 'Mjj turnON; invariant dijetmass Mjj [GeV]; efficiency' mjj_variable_numerator mjj_variable_denominator",
"effic_softdrop_variable 'Softdrop turnON; leading jet softdropmass [GeV]; efficiency' softdrop_variable_numerator softdrop_variable_denominator",
"effic_deltaphimetj1 'DELTAPHI turnON; DELTA PHI (PFMET, PFJET1); efficiency' deltaphimetj1_numerator deltaphimetj1_denominator",
"effic_deltaphij1j2 'DELTAPHI turnON; DELTA PHI (PFJET1, PFJET2); efficiency' deltaphij1j2_numerator deltaphij1j2_denominator",
),
efficiencyProfile = cms.untracked.vstring(
"effic_pfjetpT_vs_LS 'JET efficiency vs LS; LS; PF JET efficiency' jetpTVsLS_numerator jetpTVsLS_denominator",
"effic_ht_vs_LS 'HT efficiency vs LS; LS; PF HT efficiency' htVsLS_numerator htVsLS_denominator",
),
)
hltDQMonitorB2G_Client_MuEle = cms.EDProducer("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/B2G/Dileptonic/HLT_MuXX_EleXX_CaloIdL_MW"),
verbose = cms.untracked.uint32(0),
resolution = cms.vstring(),
efficiency = cms.vstring(
"effic_muPt_1 'efficiency vs leading muon p_{T};muon p_{T} [GeV];efficiency' muPt_1_numerator muPt_1_denominator",
"effic_muEta_1 'efficiency vs leading muon #eta;muon #eta;efficiency' muEta_1_numerator muEta_1_denominator",
"effic_muPhi_1 'efficiency vs leading muon #phi;muon #phi;efficiency' muPhi_1_numerator muPhi_1_denominator",
"effic_muMulti 'efficiency vs muon multiplicity;muon multiplicity;efficiency' muMulti_numerator muMulti_denominator",
"effic_elePt_1 'efficiency vs electron p_{T};electron p_{T} [GeV];efficiency' elePt_1_numerator elePt_1_denominator",
"effic_eleEta_1 'efficiency vs electron #eta;electron #eta;efficiency' eleEta_1_numerator eleEta_1_denominator",
"effic_elePhi_1 'efficiency vs electron #phi;electron #phi;efficiency' elePhi_1_numerator elePhi_1_denominator",
"effic_eleMulti 'efficiency vs electron multiplicity;electron multiplicity;efficiency' eleMulti_numerator eleMulti_denominator",
),
)
hltDQMonitorB2G_Client_MuTkMu = cms.EDProducer("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/B2G/Dileptonic/HLT_Mu37_TkMu27"),
verbose = cms.untracked.uint32(0),
resolution = cms.vstring(),
efficiency = cms.vstring(
"effic_muPt_1 'efficiency vs leading muon p_{T};muon p_{T} [GeV];efficiency' muPt_1_numerator muPt_1_denominator",
"effic_muEta_1 'efficiency vs leading muon #eta;muon #eta;efficiency' muEta_1_numerator muEta_1_denominator",
"effic_muPhi_1 'efficiency vs leading muon #phi;muon #phi;efficiency' muPhi_1_numerator muPhi_1_denominator",
"effic_muPt_2 'efficiency vs sub-leading muon p_{T};muon p_{T} [GeV];efficiency' muPt_2_numerator muPt_2_denominator",
"effic_muEta_2 'efficiency vs sub-leading muon #eta;muon #eta;efficiency' muEta_2_numerator muEta_2_denominator",
"effic_muPhi_2 'efficiency vs sub-leading muon #phi;muon #phi;efficiency' muPhi_2_numerator muPhi_2_denominator",
"effic_muMulti 'efficiency vs muon multiplicity;muon multiplicity;efficiency' muMulti_numerator muMulti_denominator",
),
)
b2gClient = cms.Sequence(
b2gjetEfficiency
+ hltDQMonitorB2G_Client_MuEle
+ hltDQMonitorB2G_Client_MuTkMu
)
|
2317569d0ebb46d38aab391a4e0674aa7d643cc1
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/iosxe/tests/ShowIpcefExactRoute/cli/equal/golden_output_expected.py
|
36b928ceb5fcc3b655e151bce3b5484ec0397366
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 139
|
py
|
golden_output_expected.py
|
expected_output = {
'source': '10.1.1.1',
'destination': '20.1.1.1',
'ip_adj': 'out of Vlan13',
'ip_addr': '172.27.0.1'
}
|
5204ec1d7c2e47bf2f12332fb1a748c985ddd67a
|
3dc647cd07a7361ed401e40d2b7cce8c826c8f6c
|
/Lib/test/test_ast.py
|
1062f01c2f81567dff1c8f93704f30f313b71add
|
[
"Python-2.0",
"CC-BY-4.0",
"MIT"
] |
permissive
|
RustPython/RustPython
|
5ddce4a9848b9de8c041ffd2634f83c0105d3f39
|
b864e5da1f18897fc884180b7093df5aa170024f
|
refs/heads/main
| 2023-09-04T12:38:29.458699
| 2023-09-03T12:33:42
| 2023-09-03T12:33:42
| 135,201,145
| 15,815
| 1,302
|
MIT
| 2023-09-14T08:11:45
| 2018-05-28T19:27:01
|
Rust
|
UTF-8
|
Python
| false
| false
| 111,447
|
py
|
test_ast.py
|
import ast
import builtins
import dis
import os
import sys
import types
import unittest
import warnings
import weakref
from textwrap import dedent
from test import support
def to_tuple(t):
if t is None or isinstance(t, (str, int, complex)):
return t
elif isinstance(t, list):
return [to_tuple(e) for e in t]
result = [t.__class__.__name__]
if hasattr(t, 'lineno') and hasattr(t, 'col_offset'):
result.append((t.lineno, t.col_offset))
if hasattr(t, 'end_lineno') and hasattr(t, 'end_col_offset'):
result[-1] += (t.end_lineno, t.end_col_offset)
if t._fields is None:
return tuple(result)
for f in t._fields:
result.append(to_tuple(getattr(t, f)))
return tuple(result)
# These tests are compiled through "exec"
# There should be at least one test per statement
exec_tests = [
# None
"None",
# Module docstring
"'module docstring'",
# FunctionDef
"def f(): pass",
# FunctionDef with docstring
"def f(): 'function docstring'",
# FunctionDef with arg
"def f(a): pass",
# FunctionDef with arg and default value
"def f(a=0): pass",
# FunctionDef with varargs
"def f(*args): pass",
# FunctionDef with kwargs
"def f(**kwargs): pass",
# FunctionDef with all kind of args and docstring
"def f(a, b=1, c=None, d=[], e={}, *args, f=42, **kwargs): 'doc for f()'",
# ClassDef
"class C:pass",
# ClassDef with docstring
"class C: 'docstring for class C'",
# ClassDef, new style class
"class C(object): pass",
# Return
"def f():return 1",
# Delete
"del v",
# Assign
"v = 1",
"a,b = c",
"(a,b) = c",
"[a,b] = c",
# AugAssign
"v += 1",
# For
"for v in v:pass",
# While
"while v:pass",
# If
"if v:pass",
# If-Elif
"if a:\n pass\nelif b:\n pass",
# If-Elif-Else
"if a:\n pass\nelif b:\n pass\nelse:\n pass",
# With
"with x as y: pass",
"with x as y, z as q: pass",
# Raise
"raise Exception('string')",
# TryExcept
"try:\n pass\nexcept Exception:\n pass",
# TryFinally
"try:\n pass\nfinally:\n pass",
# Assert
"assert v",
# Import
"import sys",
# ImportFrom
"from sys import v",
# Global
"global v",
# Expr
"1",
# Pass,
"pass",
# Break
"for v in v:break",
# Continue
"for v in v:continue",
# for statements with naked tuples (see http://bugs.python.org/issue6704)
"for a,b in c: pass",
"for (a,b) in c: pass",
"for [a,b] in c: pass",
# Multiline generator expression (test for .lineno & .col_offset)
"""(
(
Aa
,
Bb
)
for
Aa
,
Bb in Cc
)""",
# dictcomp
"{a : b for w in x for m in p if g}",
# dictcomp with naked tuple
"{a : b for v,w in x}",
# setcomp
"{r for l in x if g}",
# setcomp with naked tuple
"{r for l,m in x}",
# AsyncFunctionDef
"async def f():\n 'async function'\n await something()",
# AsyncFor
"async def f():\n async for e in i: 1\n else: 2",
# AsyncWith
"async def f():\n async with a as b: 1",
# PEP 448: Additional Unpacking Generalizations
"{**{1:2}, 2:3}",
"{*{1, 2}, 3}",
# Asynchronous comprehensions
"async def f():\n [i async for b in c]",
# Decorated FunctionDef
"@deco1\n@deco2()\n@deco3(1)\ndef f(): pass",
# Decorated AsyncFunctionDef
"@deco1\n@deco2()\n@deco3(1)\nasync def f(): pass",
# Decorated ClassDef
"@deco1\n@deco2()\n@deco3(1)\nclass C: pass",
# Decorator with generator argument
"@deco(a for a in b)\ndef f(): pass",
# Decorator with attribute
"@a.b.c\ndef f(): pass",
# Simple assignment expression
"(a := 1)",
# Positional-only arguments
"def f(a, /,): pass",
"def f(a, /, c, d, e): pass",
"def f(a, /, c, *, d, e): pass",
"def f(a, /, c, *, d, e, **kwargs): pass",
# Positional-only arguments with defaults
"def f(a=1, /,): pass",
"def f(a=1, /, b=2, c=4): pass",
"def f(a=1, /, b=2, *, c=4): pass",
"def f(a=1, /, b=2, *, c): pass",
"def f(a=1, /, b=2, *, c=4, **kwargs): pass",
"def f(a=1, /, b=2, *, c, **kwargs): pass",
]
# These are compiled through "single"
# because of overlap with "eval", it just tests what
# can't be tested with "eval"
single_tests = [
"1+2"
]
# These are compiled through "eval"
# It should test all expressions
eval_tests = [
# None
"None",
# BoolOp
"a and b",
# BinOp
"a + b",
# UnaryOp
"not v",
# Lambda
"lambda:None",
# Dict
"{ 1:2 }",
# Empty dict
"{}",
# Set
"{None,}",
# Multiline dict (test for .lineno & .col_offset)
"""{
1
:
2
}""",
# ListComp
"[a for b in c if d]",
# GeneratorExp
"(a for b in c if d)",
# Comprehensions with multiple for targets
"[(a,b) for a,b in c]",
"[(a,b) for (a,b) in c]",
"[(a,b) for [a,b] in c]",
"{(a,b) for a,b in c}",
"{(a,b) for (a,b) in c}",
"{(a,b) for [a,b] in c}",
"((a,b) for a,b in c)",
"((a,b) for (a,b) in c)",
"((a,b) for [a,b] in c)",
# Yield - yield expressions can't work outside a function
#
# Compare
"1 < 2 < 3",
# Call
"f(1,2,c=3,*d,**e)",
# Call with multi-character starred
"f(*[0, 1])",
# Call with a generator argument
"f(a for a in b)",
# Num
"10",
# Str
"'string'",
# Attribute
"a.b",
# Subscript
"a[b:c]",
# Name
"v",
# List
"[1,2,3]",
# Empty list
"[]",
# Tuple
"1,2,3",
# Tuple
"(1,2,3)",
# Empty tuple
"()",
# Combination
"a.b.c.d(a.b[1:2])",
]
# TODO: expr_context, slice, boolop, operator, unaryop, cmpop, comprehension
# excepthandler, arguments, keywords, alias
class AST_Tests(unittest.TestCase):
def _is_ast_node(self, name, node):
if not isinstance(node, type):
return False
if "ast" not in node.__module__:
return False
return name != 'AST' and name[0].isupper()
def _assertTrueorder(self, ast_node, parent_pos):
if not isinstance(ast_node, ast.AST) or ast_node._fields is None:
return
if isinstance(ast_node, (ast.expr, ast.stmt, ast.excepthandler)):
node_pos = (ast_node.lineno, ast_node.col_offset)
self.assertGreaterEqual(node_pos, parent_pos)
parent_pos = (ast_node.lineno, ast_node.col_offset)
for name in ast_node._fields:
value = getattr(ast_node, name)
if isinstance(value, list):
first_pos = parent_pos
if value and name == 'decorator_list':
first_pos = (value[0].lineno, value[0].col_offset)
for child in value:
self._assertTrueorder(child, first_pos)
elif value is not None:
self._assertTrueorder(value, parent_pos)
self.assertEqual(ast_node._fields, ast_node.__match_args__)
def test_AST_objects(self):
x = ast.AST()
self.assertEqual(x._fields, ())
x.foobar = 42
self.assertEqual(x.foobar, 42)
self.assertEqual(x.__dict__["foobar"], 42)
with self.assertRaises(AttributeError):
x.vararg
with self.assertRaises(TypeError):
# "ast.AST constructor takes 0 positional arguments"
ast.AST(2)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_AST_garbage_collection(self):
class X:
pass
a = ast.AST()
a.x = X()
a.x.a = a
ref = weakref.ref(a.x)
del a
support.gc_collect()
self.assertIsNone(ref())
@unittest.skip("TODO: RUSTPYTHON, thread 'main' panicked at 'not implemented: async for comprehensions'")
def test_snippets(self):
for input, output, kind in ((exec_tests, exec_results, "exec"),
(single_tests, single_results, "single"),
(eval_tests, eval_results, "eval")):
for i, o in zip(input, output):
with self.subTest(action="parsing", input=i):
ast_tree = compile(i, "?", kind, ast.PyCF_ONLY_AST)
self.assertEqual(to_tuple(ast_tree), o)
self._assertTrueorder(ast_tree, (0, 0))
with self.subTest(action="compiling", input=i, kind=kind):
compile(ast_tree, "?", kind)
@unittest.skip("TODO: RUSTPYTHON, thread 'main' panicked at 'not implemented: async for comprehensions'")
def test_ast_validation(self):
# compile() is the only function that calls PyAST_Validate
snippets_to_validate = exec_tests + single_tests + eval_tests
for snippet in snippets_to_validate:
tree = ast.parse(snippet)
compile(tree, '<string>', 'exec')
def test_slice(self):
slc = ast.parse("x[::]").body[0].value.slice
self.assertIsNone(slc.upper)
self.assertIsNone(slc.lower)
self.assertIsNone(slc.step)
def test_from_import(self):
im = ast.parse("from . import y").body[0]
self.assertIsNone(im.module)
def test_non_interned_future_from_ast(self):
mod = ast.parse("from __future__ import division")
self.assertIsInstance(mod.body[0], ast.ImportFrom)
mod.body[0].module = " __future__ ".strip()
compile(mod, "<test>", "exec")
def test_alias(self):
im = ast.parse("from bar import y").body[0]
self.assertEqual(len(im.names), 1)
alias = im.names[0]
self.assertEqual(alias.name, 'y')
self.assertIsNone(alias.asname)
self.assertEqual(alias.lineno, 1)
self.assertEqual(alias.end_lineno, 1)
self.assertEqual(alias.col_offset, 16)
self.assertEqual(alias.end_col_offset, 17)
im = ast.parse("from bar import *").body[0]
alias = im.names[0]
self.assertEqual(alias.name, '*')
self.assertIsNone(alias.asname)
self.assertEqual(alias.lineno, 1)
self.assertEqual(alias.end_lineno, 1)
self.assertEqual(alias.col_offset, 16)
self.assertEqual(alias.end_col_offset, 17)
def test_base_classes(self):
self.assertTrue(issubclass(ast.For, ast.stmt))
self.assertTrue(issubclass(ast.Name, ast.expr))
self.assertTrue(issubclass(ast.stmt, ast.AST))
self.assertTrue(issubclass(ast.expr, ast.AST))
self.assertTrue(issubclass(ast.comprehension, ast.AST))
self.assertTrue(issubclass(ast.Gt, ast.AST))
def test_field_attr_existence(self):
for name, item in ast.__dict__.items():
if self._is_ast_node(name, item):
if name == 'Index':
# Index(value) just returns value now.
# The argument is required.
continue
x = item()
if isinstance(x, ast.AST):
self.assertEqual(type(x._fields), tuple)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_arguments(self):
x = ast.arguments()
self.assertEqual(x._fields, ('posonlyargs', 'args', 'vararg', 'kwonlyargs',
'kw_defaults', 'kwarg', 'defaults'))
with self.assertRaises(AttributeError):
x.args
self.assertIsNone(x.vararg)
x = ast.arguments(*range(1, 8))
self.assertEqual(x.args, 2)
self.assertEqual(x.vararg, 3)
def test_field_attr_writable(self):
x = ast.Num()
# We can assign to _fields
x._fields = 666
self.assertEqual(x._fields, 666)
def test_classattrs(self):
x = ast.Num()
self.assertEqual(x._fields, ('value', 'kind'))
with self.assertRaises(AttributeError):
x.value
with self.assertRaises(AttributeError):
x.n
x = ast.Num(42)
self.assertEqual(x.value, 42)
self.assertEqual(x.n, 42)
with self.assertRaises(AttributeError):
x.lineno
with self.assertRaises(AttributeError):
x.foobar
x = ast.Num(lineno=2)
self.assertEqual(x.lineno, 2)
x = ast.Num(42, lineno=0)
self.assertEqual(x.lineno, 0)
self.assertEqual(x._fields, ('value', 'kind'))
self.assertEqual(x.value, 42)
self.assertEqual(x.n, 42)
self.assertRaises(TypeError, ast.Num, 1, None, 2)
self.assertRaises(TypeError, ast.Num, 1, None, 2, lineno=0)
# Arbitrary keyword arguments are supported
self.assertEqual(ast.Constant(1, foo='bar').foo, 'bar')
self.assertEqual(ast.Num(1, foo='bar').foo, 'bar')
with self.assertRaisesRegex(TypeError, "Num got multiple values for argument 'n'"):
ast.Num(1, n=2)
with self.assertRaisesRegex(TypeError, "Constant got multiple values for argument 'value'"):
ast.Constant(1, value=2)
self.assertEqual(ast.Num(42).n, 42)
self.assertEqual(ast.Num(4.25).n, 4.25)
self.assertEqual(ast.Num(4.25j).n, 4.25j)
self.assertEqual(ast.Str('42').s, '42')
self.assertEqual(ast.Bytes(b'42').s, b'42')
self.assertIs(ast.NameConstant(True).value, True)
self.assertIs(ast.NameConstant(False).value, False)
self.assertIs(ast.NameConstant(None).value, None)
self.assertEqual(ast.Constant(42).value, 42)
self.assertEqual(ast.Constant(4.25).value, 4.25)
self.assertEqual(ast.Constant(4.25j).value, 4.25j)
self.assertEqual(ast.Constant('42').value, '42')
self.assertEqual(ast.Constant(b'42').value, b'42')
self.assertIs(ast.Constant(True).value, True)
self.assertIs(ast.Constant(False).value, False)
self.assertIs(ast.Constant(None).value, None)
self.assertIs(ast.Constant(...).value, ...)
def test_realtype(self):
self.assertEqual(type(ast.Num(42)), ast.Constant)
self.assertEqual(type(ast.Num(4.25)), ast.Constant)
self.assertEqual(type(ast.Num(4.25j)), ast.Constant)
self.assertEqual(type(ast.Str('42')), ast.Constant)
self.assertEqual(type(ast.Bytes(b'42')), ast.Constant)
self.assertEqual(type(ast.NameConstant(True)), ast.Constant)
self.assertEqual(type(ast.NameConstant(False)), ast.Constant)
self.assertEqual(type(ast.NameConstant(None)), ast.Constant)
self.assertEqual(type(ast.Ellipsis()), ast.Constant)
def test_isinstance(self):
self.assertTrue(isinstance(ast.Num(42), ast.Num))
self.assertTrue(isinstance(ast.Num(4.2), ast.Num))
self.assertTrue(isinstance(ast.Num(4.2j), ast.Num))
self.assertTrue(isinstance(ast.Str('42'), ast.Str))
self.assertTrue(isinstance(ast.Bytes(b'42'), ast.Bytes))
self.assertTrue(isinstance(ast.NameConstant(True), ast.NameConstant))
self.assertTrue(isinstance(ast.NameConstant(False), ast.NameConstant))
self.assertTrue(isinstance(ast.NameConstant(None), ast.NameConstant))
self.assertTrue(isinstance(ast.Ellipsis(), ast.Ellipsis))
self.assertTrue(isinstance(ast.Constant(42), ast.Num))
self.assertTrue(isinstance(ast.Constant(4.2), ast.Num))
self.assertTrue(isinstance(ast.Constant(4.2j), ast.Num))
self.assertTrue(isinstance(ast.Constant('42'), ast.Str))
self.assertTrue(isinstance(ast.Constant(b'42'), ast.Bytes))
self.assertTrue(isinstance(ast.Constant(True), ast.NameConstant))
self.assertTrue(isinstance(ast.Constant(False), ast.NameConstant))
self.assertTrue(isinstance(ast.Constant(None), ast.NameConstant))
self.assertTrue(isinstance(ast.Constant(...), ast.Ellipsis))
self.assertFalse(isinstance(ast.Str('42'), ast.Num))
self.assertFalse(isinstance(ast.Num(42), ast.Str))
self.assertFalse(isinstance(ast.Str('42'), ast.Bytes))
self.assertFalse(isinstance(ast.Num(42), ast.NameConstant))
self.assertFalse(isinstance(ast.Num(42), ast.Ellipsis))
self.assertFalse(isinstance(ast.NameConstant(True), ast.Num))
self.assertFalse(isinstance(ast.NameConstant(False), ast.Num))
self.assertFalse(isinstance(ast.Constant('42'), ast.Num))
self.assertFalse(isinstance(ast.Constant(42), ast.Str))
self.assertFalse(isinstance(ast.Constant('42'), ast.Bytes))
self.assertFalse(isinstance(ast.Constant(42), ast.NameConstant))
self.assertFalse(isinstance(ast.Constant(42), ast.Ellipsis))
self.assertFalse(isinstance(ast.Constant(True), ast.Num))
self.assertFalse(isinstance(ast.Constant(False), ast.Num))
self.assertFalse(isinstance(ast.Constant(), ast.Num))
self.assertFalse(isinstance(ast.Constant(), ast.Str))
self.assertFalse(isinstance(ast.Constant(), ast.Bytes))
self.assertFalse(isinstance(ast.Constant(), ast.NameConstant))
self.assertFalse(isinstance(ast.Constant(), ast.Ellipsis))
class S(str): pass
self.assertTrue(isinstance(ast.Constant(S('42')), ast.Str))
self.assertFalse(isinstance(ast.Constant(S('42')), ast.Num))
def test_subclasses(self):
class N(ast.Num):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.z = 'spam'
class N2(ast.Num):
pass
n = N(42)
self.assertEqual(n.n, 42)
self.assertEqual(n.z, 'spam')
self.assertEqual(type(n), N)
self.assertTrue(isinstance(n, N))
self.assertTrue(isinstance(n, ast.Num))
self.assertFalse(isinstance(n, N2))
self.assertFalse(isinstance(ast.Num(42), N))
n = N(n=42)
self.assertEqual(n.n, 42)
self.assertEqual(type(n), N)
def test_module(self):
body = [ast.Num(42)]
x = ast.Module(body, [])
self.assertEqual(x.body, body)
def test_nodeclasses(self):
# Zero arguments constructor explicitly allowed
x = ast.BinOp()
self.assertEqual(x._fields, ('left', 'op', 'right'))
# Random attribute allowed too
x.foobarbaz = 5
self.assertEqual(x.foobarbaz, 5)
n1 = ast.Num(1)
n3 = ast.Num(3)
addop = ast.Add()
x = ast.BinOp(n1, addop, n3)
self.assertEqual(x.left, n1)
self.assertEqual(x.op, addop)
self.assertEqual(x.right, n3)
x = ast.BinOp(1, 2, 3)
self.assertEqual(x.left, 1)
self.assertEqual(x.op, 2)
self.assertEqual(x.right, 3)
x = ast.BinOp(1, 2, 3, lineno=0)
self.assertEqual(x.left, 1)
self.assertEqual(x.op, 2)
self.assertEqual(x.right, 3)
self.assertEqual(x.lineno, 0)
# node raises exception when given too many arguments
self.assertRaises(TypeError, ast.BinOp, 1, 2, 3, 4)
# node raises exception when given too many arguments
self.assertRaises(TypeError, ast.BinOp, 1, 2, 3, 4, lineno=0)
# can set attributes through kwargs too
x = ast.BinOp(left=1, op=2, right=3, lineno=0)
self.assertEqual(x.left, 1)
self.assertEqual(x.op, 2)
self.assertEqual(x.right, 3)
self.assertEqual(x.lineno, 0)
# Random kwargs also allowed
x = ast.BinOp(1, 2, 3, foobarbaz=42)
self.assertEqual(x.foobarbaz, 42)
def test_no_fields(self):
# this used to fail because Sub._fields was None
x = ast.Sub()
self.assertEqual(x._fields, ())
# TODO: RUSTPYTHON _ast classes should be HEAPTYPES (except for _ast.AST)
@unittest.expectedFailure
def test_pickling(self):
import pickle
mods = [pickle]
try:
import cPickle
mods.append(cPickle)
except ImportError:
pass
protocols = [0, 1, 2]
for mod in mods:
for protocol in protocols:
for ast in (compile(i, "?", "exec", 0x400) for i in exec_tests):
ast2 = mod.loads(mod.dumps(ast, protocol))
self.assertEqual(to_tuple(ast2), to_tuple(ast))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_invalid_sum(self):
pos = dict(lineno=2, col_offset=3)
m = ast.Module([ast.Expr(ast.expr(**pos), **pos)], [])
with self.assertRaises(TypeError) as cm:
compile(m, "<test>", "exec")
self.assertIn("but got <ast.expr", str(cm.exception))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_invalid_identifier(self):
m = ast.Module([ast.Expr(ast.Name(42, ast.Load()))], [])
ast.fix_missing_locations(m)
with self.assertRaises(TypeError) as cm:
compile(m, "<test>", "exec")
self.assertIn("identifier must be of type str", str(cm.exception))
def test_invalid_constant(self):
for invalid_constant in int, (1, 2, int), frozenset((1, 2, int)):
e = ast.Expression(body=ast.Constant(invalid_constant))
ast.fix_missing_locations(e)
with self.assertRaisesRegex(
TypeError, "invalid type in Constant: type"
):
compile(e, "<test>", "eval")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_empty_yield_from(self):
# Issue 16546: yield from value is not optional.
empty_yield_from = ast.parse("def f():\n yield from g()")
empty_yield_from.body[0].body[0].value.value = None
with self.assertRaises(ValueError) as cm:
compile(empty_yield_from, "<test>", "exec")
self.assertIn("field 'value' is required", str(cm.exception))
@support.cpython_only
def test_issue31592(self):
# There shouldn't be an assertion failure in case of a bad
# unicodedata.normalize().
import unicodedata
def bad_normalize(*args):
return None
with support.swap_attr(unicodedata, 'normalize', bad_normalize):
self.assertRaises(TypeError, ast.parse, '\u03D5')
def test_issue18374_binop_col_offset(self):
tree = ast.parse('4+5+6+7')
parent_binop = tree.body[0].value
child_binop = parent_binop.left
grandchild_binop = child_binop.left
self.assertEqual(parent_binop.col_offset, 0)
self.assertEqual(parent_binop.end_col_offset, 7)
self.assertEqual(child_binop.col_offset, 0)
self.assertEqual(child_binop.end_col_offset, 5)
self.assertEqual(grandchild_binop.col_offset, 0)
self.assertEqual(grandchild_binop.end_col_offset, 3)
tree = ast.parse('4+5-\\\n 6-7')
parent_binop = tree.body[0].value
child_binop = parent_binop.left
grandchild_binop = child_binop.left
self.assertEqual(parent_binop.col_offset, 0)
self.assertEqual(parent_binop.lineno, 1)
self.assertEqual(parent_binop.end_col_offset, 4)
self.assertEqual(parent_binop.end_lineno, 2)
self.assertEqual(child_binop.col_offset, 0)
self.assertEqual(child_binop.lineno, 1)
self.assertEqual(child_binop.end_col_offset, 2)
self.assertEqual(child_binop.end_lineno, 2)
self.assertEqual(grandchild_binop.col_offset, 0)
self.assertEqual(grandchild_binop.lineno, 1)
self.assertEqual(grandchild_binop.end_col_offset, 3)
self.assertEqual(grandchild_binop.end_lineno, 1)
def test_issue39579_dotted_name_end_col_offset(self):
tree = ast.parse('@a.b.c\ndef f(): pass')
attr_b = tree.body[0].decorator_list[0].value
self.assertEqual(attr_b.end_col_offset, 4)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_ast_asdl_signature(self):
self.assertEqual(ast.withitem.__doc__, "withitem(expr context_expr, expr? optional_vars)")
self.assertEqual(ast.GtE.__doc__, "GtE")
self.assertEqual(ast.Name.__doc__, "Name(identifier id, expr_context ctx)")
self.assertEqual(ast.cmpop.__doc__, "cmpop = Eq | NotEq | Lt | LtE | Gt | GtE | Is | IsNot | In | NotIn")
expressions = [f" | {node.__doc__}" for node in ast.expr.__subclasses__()]
expressions[0] = f"expr = {ast.expr.__subclasses__()[0].__doc__}"
self.assertCountEqual(ast.expr.__doc__.split("\n"), expressions)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_parenthesized_with_feature_version(self):
ast.parse('with (CtxManager() as example): ...', feature_version=(3, 10))
# While advertised as a feature in Python 3.10, this was allowed starting 3.9
ast.parse('with (CtxManager() as example): ...', feature_version=(3, 9))
with self.assertRaises(SyntaxError):
ast.parse('with (CtxManager() as example): ...', feature_version=(3, 8))
ast.parse('with CtxManager() as example: ...', feature_version=(3, 8))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue40614_feature_version(self):
ast.parse('f"{x=}"', feature_version=(3, 8))
with self.assertRaises(SyntaxError):
ast.parse('f"{x=}"', feature_version=(3, 7))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_assignment_expression_feature_version(self):
ast.parse('(x := 0)', feature_version=(3, 8))
with self.assertRaises(SyntaxError):
ast.parse('(x := 0)', feature_version=(3, 7))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_constant_as_name(self):
for constant in "True", "False", "None":
expr = ast.Expression(ast.Name(constant, ast.Load()))
ast.fix_missing_locations(expr)
with self.assertRaisesRegex(ValueError, f"identifier field can't represent '{constant}' constant"):
compile(expr, "<test>", "eval")
class ASTHelpers_Test(unittest.TestCase):
maxDiff = None
def test_parse(self):
a = ast.parse('foo(1 + 1)')
b = compile('foo(1 + 1)', '<unknown>', 'exec', ast.PyCF_ONLY_AST)
self.assertEqual(ast.dump(a), ast.dump(b))
def test_parse_in_error(self):
try:
1/0
except Exception:
with self.assertRaises(SyntaxError) as e:
ast.literal_eval(r"'\U'")
self.assertIsNotNone(e.exception.__context__)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_dump(self):
node = ast.parse('spam(eggs, "and cheese")')
self.assertEqual(ast.dump(node),
"Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load()), "
"args=[Name(id='eggs', ctx=Load()), Constant(value='and cheese')], "
"keywords=[]))], type_ignores=[])"
)
self.assertEqual(ast.dump(node, annotate_fields=False),
"Module([Expr(Call(Name('spam', Load()), [Name('eggs', Load()), "
"Constant('and cheese')], []))], [])"
)
self.assertEqual(ast.dump(node, include_attributes=True),
"Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load(), "
"lineno=1, col_offset=0, end_lineno=1, end_col_offset=4), "
"args=[Name(id='eggs', ctx=Load(), lineno=1, col_offset=5, "
"end_lineno=1, end_col_offset=9), Constant(value='and cheese', "
"lineno=1, col_offset=11, end_lineno=1, end_col_offset=23)], keywords=[], "
"lineno=1, col_offset=0, end_lineno=1, end_col_offset=24), "
"lineno=1, col_offset=0, end_lineno=1, end_col_offset=24)], type_ignores=[])"
)
# TODO: RUSTPYTHON; redundant kind for Contant node
@unittest.expectedFailure
def test_dump_indent(self):
node = ast.parse('spam(eggs, "and cheese")')
self.assertEqual(ast.dump(node, indent=3), """\
Module(
body=[
Expr(
value=Call(
func=Name(id='spam', ctx=Load()),
args=[
Name(id='eggs', ctx=Load()),
Constant(value='and cheese')],
keywords=[]))],
type_ignores=[])""")
self.assertEqual(ast.dump(node, annotate_fields=False, indent='\t'), """\
Module(
\t[
\t\tExpr(
\t\t\tCall(
\t\t\t\tName('spam', Load()),
\t\t\t\t[
\t\t\t\t\tName('eggs', Load()),
\t\t\t\t\tConstant('and cheese')],
\t\t\t\t[]))],
\t[])""")
self.assertEqual(ast.dump(node, include_attributes=True, indent=3), """\
Module(
body=[
Expr(
value=Call(
func=Name(
id='spam',
ctx=Load(),
lineno=1,
col_offset=0,
end_lineno=1,
end_col_offset=4),
args=[
Name(
id='eggs',
ctx=Load(),
lineno=1,
col_offset=5,
end_lineno=1,
end_col_offset=9),
Constant(
value='and cheese',
lineno=1,
col_offset=11,
end_lineno=1,
end_col_offset=23)],
keywords=[],
lineno=1,
col_offset=0,
end_lineno=1,
end_col_offset=24),
lineno=1,
col_offset=0,
end_lineno=1,
end_col_offset=24)],
type_ignores=[])""")
def test_dump_incomplete(self):
node = ast.Raise(lineno=3, col_offset=4)
self.assertEqual(ast.dump(node),
"Raise()"
)
self.assertEqual(ast.dump(node, include_attributes=True),
"Raise(lineno=3, col_offset=4)"
)
node = ast.Raise(exc=ast.Name(id='e', ctx=ast.Load()), lineno=3, col_offset=4)
self.assertEqual(ast.dump(node),
"Raise(exc=Name(id='e', ctx=Load()))"
)
self.assertEqual(ast.dump(node, annotate_fields=False),
"Raise(Name('e', Load()))"
)
self.assertEqual(ast.dump(node, include_attributes=True),
"Raise(exc=Name(id='e', ctx=Load()), lineno=3, col_offset=4)"
)
self.assertEqual(ast.dump(node, annotate_fields=False, include_attributes=True),
"Raise(Name('e', Load()), lineno=3, col_offset=4)"
)
node = ast.Raise(cause=ast.Name(id='e', ctx=ast.Load()))
self.assertEqual(ast.dump(node),
"Raise(cause=Name(id='e', ctx=Load()))"
)
self.assertEqual(ast.dump(node, annotate_fields=False),
"Raise(cause=Name('e', Load()))"
)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_copy_location(self):
src = ast.parse('1 + 1', mode='eval')
src.body.right = ast.copy_location(ast.Num(2), src.body.right)
self.assertEqual(ast.dump(src, include_attributes=True),
'Expression(body=BinOp(left=Constant(value=1, lineno=1, col_offset=0, '
'end_lineno=1, end_col_offset=1), op=Add(), right=Constant(value=2, '
'lineno=1, col_offset=4, end_lineno=1, end_col_offset=5), lineno=1, '
'col_offset=0, end_lineno=1, end_col_offset=5))'
)
src = ast.Call(col_offset=1, lineno=1, end_lineno=1, end_col_offset=1)
new = ast.copy_location(src, ast.Call(col_offset=None, lineno=None))
self.assertIsNone(new.end_lineno)
self.assertIsNone(new.end_col_offset)
self.assertEqual(new.lineno, 1)
self.assertEqual(new.col_offset, 1)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_fix_missing_locations(self):
src = ast.parse('write("spam")')
src.body.append(ast.Expr(ast.Call(ast.Name('spam', ast.Load()),
[ast.Str('eggs')], [])))
self.assertEqual(src, ast.fix_missing_locations(src))
self.maxDiff = None
self.assertEqual(ast.dump(src, include_attributes=True),
"Module(body=[Expr(value=Call(func=Name(id='write', ctx=Load(), "
"lineno=1, col_offset=0, end_lineno=1, end_col_offset=5), "
"args=[Constant(value='spam', lineno=1, col_offset=6, end_lineno=1, "
"end_col_offset=12)], keywords=[], lineno=1, col_offset=0, end_lineno=1, "
"end_col_offset=13), lineno=1, col_offset=0, end_lineno=1, "
"end_col_offset=13), Expr(value=Call(func=Name(id='spam', ctx=Load(), "
"lineno=1, col_offset=0, end_lineno=1, end_col_offset=0), "
"args=[Constant(value='eggs', lineno=1, col_offset=0, end_lineno=1, "
"end_col_offset=0)], keywords=[], lineno=1, col_offset=0, end_lineno=1, "
"end_col_offset=0), lineno=1, col_offset=0, end_lineno=1, end_col_offset=0)], "
"type_ignores=[])"
)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_increment_lineno(self):
src = ast.parse('1 + 1', mode='eval')
self.assertEqual(ast.increment_lineno(src, n=3), src)
self.assertEqual(ast.dump(src, include_attributes=True),
'Expression(body=BinOp(left=Constant(value=1, lineno=4, col_offset=0, '
'end_lineno=4, end_col_offset=1), op=Add(), right=Constant(value=1, '
'lineno=4, col_offset=4, end_lineno=4, end_col_offset=5), lineno=4, '
'col_offset=0, end_lineno=4, end_col_offset=5))'
)
# issue10869: do not increment lineno of root twice
src = ast.parse('1 + 1', mode='eval')
self.assertEqual(ast.increment_lineno(src.body, n=3), src.body)
self.assertEqual(ast.dump(src, include_attributes=True),
'Expression(body=BinOp(left=Constant(value=1, lineno=4, col_offset=0, '
'end_lineno=4, end_col_offset=1), op=Add(), right=Constant(value=1, '
'lineno=4, col_offset=4, end_lineno=4, end_col_offset=5), lineno=4, '
'col_offset=0, end_lineno=4, end_col_offset=5))'
)
src = ast.Call(
func=ast.Name("test", ast.Load()), args=[], keywords=[], lineno=1
)
self.assertEqual(ast.increment_lineno(src).lineno, 2)
self.assertIsNone(ast.increment_lineno(src).end_lineno)
def test_iter_fields(self):
node = ast.parse('foo()', mode='eval')
d = dict(ast.iter_fields(node.body))
self.assertEqual(d.pop('func').id, 'foo')
self.assertEqual(d, {'keywords': [], 'args': []})
# TODO: RUSTPYTHON; redundant kind for Constant node
@unittest.expectedFailure
def test_iter_child_nodes(self):
node = ast.parse("spam(23, 42, eggs='leek')", mode='eval')
self.assertEqual(len(list(ast.iter_child_nodes(node.body))), 4)
iterator = ast.iter_child_nodes(node.body)
self.assertEqual(next(iterator).id, 'spam')
self.assertEqual(next(iterator).value, 23)
self.assertEqual(next(iterator).value, 42)
self.assertEqual(ast.dump(next(iterator)),
"keyword(arg='eggs', value=Constant(value='leek'))"
)
def test_get_docstring(self):
node = ast.parse('"""line one\n line two"""')
self.assertEqual(ast.get_docstring(node),
'line one\nline two')
node = ast.parse('class foo:\n """line one\n line two"""')
self.assertEqual(ast.get_docstring(node.body[0]),
'line one\nline two')
node = ast.parse('def foo():\n """line one\n line two"""')
self.assertEqual(ast.get_docstring(node.body[0]),
'line one\nline two')
node = ast.parse('async def foo():\n """spam\n ham"""')
self.assertEqual(ast.get_docstring(node.body[0]), 'spam\nham')
def test_get_docstring_none(self):
self.assertIsNone(ast.get_docstring(ast.parse('')))
node = ast.parse('x = "not docstring"')
self.assertIsNone(ast.get_docstring(node))
node = ast.parse('def foo():\n pass')
self.assertIsNone(ast.get_docstring(node))
node = ast.parse('class foo:\n pass')
self.assertIsNone(ast.get_docstring(node.body[0]))
node = ast.parse('class foo:\n x = "not docstring"')
self.assertIsNone(ast.get_docstring(node.body[0]))
node = ast.parse('class foo:\n def bar(self): pass')
self.assertIsNone(ast.get_docstring(node.body[0]))
node = ast.parse('def foo():\n pass')
self.assertIsNone(ast.get_docstring(node.body[0]))
node = ast.parse('def foo():\n x = "not docstring"')
self.assertIsNone(ast.get_docstring(node.body[0]))
node = ast.parse('async def foo():\n pass')
self.assertIsNone(ast.get_docstring(node.body[0]))
node = ast.parse('async def foo():\n x = "not docstring"')
self.assertIsNone(ast.get_docstring(node.body[0]))
def test_multi_line_docstring_col_offset_and_lineno_issue16806(self):
node = ast.parse(
'"""line one\nline two"""\n\n'
'def foo():\n """line one\n line two"""\n\n'
' def bar():\n """line one\n line two"""\n'
' """line one\n line two"""\n'
'"""line one\nline two"""\n\n'
)
self.assertEqual(node.body[0].col_offset, 0)
self.assertEqual(node.body[0].lineno, 1)
self.assertEqual(node.body[1].body[0].col_offset, 2)
self.assertEqual(node.body[1].body[0].lineno, 5)
self.assertEqual(node.body[1].body[1].body[0].col_offset, 4)
self.assertEqual(node.body[1].body[1].body[0].lineno, 9)
self.assertEqual(node.body[1].body[2].col_offset, 2)
self.assertEqual(node.body[1].body[2].lineno, 11)
self.assertEqual(node.body[2].col_offset, 0)
self.assertEqual(node.body[2].lineno, 13)
def test_elif_stmt_start_position(self):
node = ast.parse('if a:\n pass\nelif b:\n pass\n')
elif_stmt = node.body[0].orelse[0]
self.assertEqual(elif_stmt.lineno, 3)
self.assertEqual(elif_stmt.col_offset, 0)
def test_elif_stmt_start_position_with_else(self):
node = ast.parse('if a:\n pass\nelif b:\n pass\nelse:\n pass\n')
elif_stmt = node.body[0].orelse[0]
self.assertEqual(elif_stmt.lineno, 3)
self.assertEqual(elif_stmt.col_offset, 0)
def test_starred_expr_end_position_within_call(self):
node = ast.parse('f(*[0, 1])')
starred_expr = node.body[0].value.args[0]
self.assertEqual(starred_expr.end_lineno, 1)
self.assertEqual(starred_expr.end_col_offset, 9)
def test_literal_eval(self):
self.assertEqual(ast.literal_eval('[1, 2, 3]'), [1, 2, 3])
self.assertEqual(ast.literal_eval('{"foo": 42}'), {"foo": 42})
self.assertEqual(ast.literal_eval('(True, False, None)'), (True, False, None))
self.assertEqual(ast.literal_eval('{1, 2, 3}'), {1, 2, 3})
self.assertEqual(ast.literal_eval('b"hi"'), b"hi")
self.assertEqual(ast.literal_eval('set()'), set())
self.assertRaises(ValueError, ast.literal_eval, 'foo()')
self.assertEqual(ast.literal_eval('6'), 6)
self.assertEqual(ast.literal_eval('+6'), 6)
self.assertEqual(ast.literal_eval('-6'), -6)
self.assertEqual(ast.literal_eval('3.25'), 3.25)
self.assertEqual(ast.literal_eval('+3.25'), 3.25)
self.assertEqual(ast.literal_eval('-3.25'), -3.25)
self.assertEqual(repr(ast.literal_eval('-0.0')), '-0.0')
self.assertRaises(ValueError, ast.literal_eval, '++6')
self.assertRaises(ValueError, ast.literal_eval, '+True')
self.assertRaises(ValueError, ast.literal_eval, '2+3')
def test_literal_eval_complex(self):
# Issue #4907
self.assertEqual(ast.literal_eval('6j'), 6j)
self.assertEqual(ast.literal_eval('-6j'), -6j)
self.assertEqual(ast.literal_eval('6.75j'), 6.75j)
self.assertEqual(ast.literal_eval('-6.75j'), -6.75j)
self.assertEqual(ast.literal_eval('3+6j'), 3+6j)
self.assertEqual(ast.literal_eval('-3+6j'), -3+6j)
self.assertEqual(ast.literal_eval('3-6j'), 3-6j)
self.assertEqual(ast.literal_eval('-3-6j'), -3-6j)
self.assertEqual(ast.literal_eval('3.25+6.75j'), 3.25+6.75j)
self.assertEqual(ast.literal_eval('-3.25+6.75j'), -3.25+6.75j)
self.assertEqual(ast.literal_eval('3.25-6.75j'), 3.25-6.75j)
self.assertEqual(ast.literal_eval('-3.25-6.75j'), -3.25-6.75j)
self.assertEqual(ast.literal_eval('(3+6j)'), 3+6j)
self.assertRaises(ValueError, ast.literal_eval, '-6j+3')
self.assertRaises(ValueError, ast.literal_eval, '-6j+3j')
self.assertRaises(ValueError, ast.literal_eval, '3+-6j')
self.assertRaises(ValueError, ast.literal_eval, '3+(0+6j)')
self.assertRaises(ValueError, ast.literal_eval, '-(3+6j)')
def test_literal_eval_malformed_dict_nodes(self):
malformed = ast.Dict(keys=[ast.Constant(1), ast.Constant(2)], values=[ast.Constant(3)])
self.assertRaises(ValueError, ast.literal_eval, malformed)
malformed = ast.Dict(keys=[ast.Constant(1)], values=[ast.Constant(2), ast.Constant(3)])
self.assertRaises(ValueError, ast.literal_eval, malformed)
def test_literal_eval_trailing_ws(self):
self.assertEqual(ast.literal_eval(" -1"), -1)
self.assertEqual(ast.literal_eval("\t\t-1"), -1)
self.assertEqual(ast.literal_eval(" \t -1"), -1)
self.assertRaises(IndentationError, ast.literal_eval, "\n -1")
def test_literal_eval_malformed_lineno(self):
msg = r'malformed node or string on line 3:'
with self.assertRaisesRegex(ValueError, msg):
ast.literal_eval("{'a': 1,\n'b':2,\n'c':++3,\n'd':4}")
node = ast.UnaryOp(
ast.UAdd(), ast.UnaryOp(ast.UAdd(), ast.Constant(6)))
self.assertIsNone(getattr(node, 'lineno', None))
msg = r'malformed node or string:'
with self.assertRaisesRegex(ValueError, msg):
ast.literal_eval(node)
def test_literal_eval_syntax_errors(self):
with self.assertRaisesRegex(SyntaxError, "unexpected indent"):
ast.literal_eval(r'''
\
(\
\ ''')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_bad_integer(self):
# issue13436: Bad error message with invalid numeric values
body = [ast.ImportFrom(module='time',
names=[ast.alias(name='sleep')],
level=None,
lineno=None, col_offset=None)]
mod = ast.Module(body, [])
with self.assertRaises(ValueError) as cm:
compile(mod, 'test', 'exec')
self.assertIn("invalid integer value: None", str(cm.exception))
def test_level_as_none(self):
body = [ast.ImportFrom(module='time',
names=[ast.alias(name='sleep',
lineno=0, col_offset=0)],
level=None,
lineno=0, col_offset=0)]
mod = ast.Module(body, [])
code = compile(mod, 'test', 'exec')
ns = {}
exec(code, ns)
self.assertIn('sleep', ns)
@unittest.skip("TODO: RUSTPYTHON; crash")
def test_recursion_direct(self):
e = ast.UnaryOp(op=ast.Not(), lineno=0, col_offset=0)
e.operand = e
with self.assertRaises(RecursionError):
with support.infinite_recursion():
compile(ast.Expression(e), "<test>", "eval")
@unittest.skip("TODO: RUSTPYTHON; crash")
def test_recursion_indirect(self):
e = ast.UnaryOp(op=ast.Not(), lineno=0, col_offset=0)
f = ast.UnaryOp(op=ast.Not(), lineno=0, col_offset=0)
e.operand = f
f.operand = e
with self.assertRaises(RecursionError):
with support.infinite_recursion():
compile(ast.Expression(e), "<test>", "eval")
class ASTValidatorTests(unittest.TestCase):
def mod(self, mod, msg=None, mode="exec", *, exc=ValueError):
mod.lineno = mod.col_offset = 0
ast.fix_missing_locations(mod)
if msg is None:
compile(mod, "<test>", mode)
else:
with self.assertRaises(exc) as cm:
compile(mod, "<test>", mode)
self.assertIn(msg, str(cm.exception))
def expr(self, node, msg=None, *, exc=ValueError):
mod = ast.Module([ast.Expr(node)], [])
self.mod(mod, msg, exc=exc)
def stmt(self, stmt, msg=None):
mod = ast.Module([stmt], [])
self.mod(mod, msg)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_module(self):
m = ast.Interactive([ast.Expr(ast.Name("x", ast.Store()))])
self.mod(m, "must have Load context", "single")
m = ast.Expression(ast.Name("x", ast.Store()))
self.mod(m, "must have Load context", "eval")
def _check_arguments(self, fac, check):
def arguments(args=None, posonlyargs=None, vararg=None,
kwonlyargs=None, kwarg=None,
defaults=None, kw_defaults=None):
if args is None:
args = []
if posonlyargs is None:
posonlyargs = []
if kwonlyargs is None:
kwonlyargs = []
if defaults is None:
defaults = []
if kw_defaults is None:
kw_defaults = []
args = ast.arguments(args, posonlyargs, vararg, kwonlyargs,
kw_defaults, kwarg, defaults)
return fac(args)
args = [ast.arg("x", ast.Name("x", ast.Store()))]
check(arguments(args=args), "must have Load context")
check(arguments(posonlyargs=args), "must have Load context")
check(arguments(kwonlyargs=args), "must have Load context")
check(arguments(defaults=[ast.Num(3)]),
"more positional defaults than args")
check(arguments(kw_defaults=[ast.Num(4)]),
"length of kwonlyargs is not the same as kw_defaults")
args = [ast.arg("x", ast.Name("x", ast.Load()))]
check(arguments(args=args, defaults=[ast.Name("x", ast.Store())]),
"must have Load context")
args = [ast.arg("a", ast.Name("x", ast.Load())),
ast.arg("b", ast.Name("y", ast.Load()))]
check(arguments(kwonlyargs=args,
kw_defaults=[None, ast.Name("x", ast.Store())]),
"must have Load context")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_funcdef(self):
a = ast.arguments([], [], None, [], [], None, [])
f = ast.FunctionDef("x", a, [], [], None)
self.stmt(f, "empty body on FunctionDef")
f = ast.FunctionDef("x", a, [ast.Pass()], [ast.Name("x", ast.Store())],
None)
self.stmt(f, "must have Load context")
f = ast.FunctionDef("x", a, [ast.Pass()], [],
ast.Name("x", ast.Store()))
self.stmt(f, "must have Load context")
def fac(args):
return ast.FunctionDef("x", args, [ast.Pass()], [], None)
self._check_arguments(fac, self.stmt)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_classdef(self):
def cls(bases=None, keywords=None, body=None, decorator_list=None):
if bases is None:
bases = []
if keywords is None:
keywords = []
if body is None:
body = [ast.Pass()]
if decorator_list is None:
decorator_list = []
return ast.ClassDef("myclass", bases, keywords,
body, decorator_list)
self.stmt(cls(bases=[ast.Name("x", ast.Store())]),
"must have Load context")
self.stmt(cls(keywords=[ast.keyword("x", ast.Name("x", ast.Store()))]),
"must have Load context")
self.stmt(cls(body=[]), "empty body on ClassDef")
self.stmt(cls(body=[None]), "None disallowed")
self.stmt(cls(decorator_list=[ast.Name("x", ast.Store())]),
"must have Load context")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_delete(self):
self.stmt(ast.Delete([]), "empty targets on Delete")
self.stmt(ast.Delete([None]), "None disallowed")
self.stmt(ast.Delete([ast.Name("x", ast.Load())]),
"must have Del context")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_assign(self):
self.stmt(ast.Assign([], ast.Num(3)), "empty targets on Assign")
self.stmt(ast.Assign([None], ast.Num(3)), "None disallowed")
self.stmt(ast.Assign([ast.Name("x", ast.Load())], ast.Num(3)),
"must have Store context")
self.stmt(ast.Assign([ast.Name("x", ast.Store())],
ast.Name("y", ast.Store())),
"must have Load context")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_augassign(self):
aug = ast.AugAssign(ast.Name("x", ast.Load()), ast.Add(),
ast.Name("y", ast.Load()))
self.stmt(aug, "must have Store context")
aug = ast.AugAssign(ast.Name("x", ast.Store()), ast.Add(),
ast.Name("y", ast.Store()))
self.stmt(aug, "must have Load context")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_for(self):
x = ast.Name("x", ast.Store())
y = ast.Name("y", ast.Load())
p = ast.Pass()
self.stmt(ast.For(x, y, [], []), "empty body on For")
self.stmt(ast.For(ast.Name("x", ast.Load()), y, [p], []),
"must have Store context")
self.stmt(ast.For(x, ast.Name("y", ast.Store()), [p], []),
"must have Load context")
e = ast.Expr(ast.Name("x", ast.Store()))
self.stmt(ast.For(x, y, [e], []), "must have Load context")
self.stmt(ast.For(x, y, [p], [e]), "must have Load context")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_while(self):
self.stmt(ast.While(ast.Num(3), [], []), "empty body on While")
self.stmt(ast.While(ast.Name("x", ast.Store()), [ast.Pass()], []),
"must have Load context")
self.stmt(ast.While(ast.Num(3), [ast.Pass()],
[ast.Expr(ast.Name("x", ast.Store()))]),
"must have Load context")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_if(self):
self.stmt(ast.If(ast.Num(3), [], []), "empty body on If")
i = ast.If(ast.Name("x", ast.Store()), [ast.Pass()], [])
self.stmt(i, "must have Load context")
i = ast.If(ast.Num(3), [ast.Expr(ast.Name("x", ast.Store()))], [])
self.stmt(i, "must have Load context")
i = ast.If(ast.Num(3), [ast.Pass()],
[ast.Expr(ast.Name("x", ast.Store()))])
self.stmt(i, "must have Load context")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_with(self):
p = ast.Pass()
self.stmt(ast.With([], [p]), "empty items on With")
i = ast.withitem(ast.Num(3), None)
self.stmt(ast.With([i], []), "empty body on With")
i = ast.withitem(ast.Name("x", ast.Store()), None)
self.stmt(ast.With([i], [p]), "must have Load context")
i = ast.withitem(ast.Num(3), ast.Name("x", ast.Load()))
self.stmt(ast.With([i], [p]), "must have Store context")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_raise(self):
r = ast.Raise(None, ast.Num(3))
self.stmt(r, "Raise with cause but no exception")
r = ast.Raise(ast.Name("x", ast.Store()), None)
self.stmt(r, "must have Load context")
r = ast.Raise(ast.Num(4), ast.Name("x", ast.Store()))
self.stmt(r, "must have Load context")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_try(self):
p = ast.Pass()
t = ast.Try([], [], [], [p])
self.stmt(t, "empty body on Try")
t = ast.Try([ast.Expr(ast.Name("x", ast.Store()))], [], [], [p])
self.stmt(t, "must have Load context")
t = ast.Try([p], [], [], [])
self.stmt(t, "Try has neither except handlers nor finalbody")
t = ast.Try([p], [], [p], [p])
self.stmt(t, "Try has orelse but no except handlers")
t = ast.Try([p], [ast.ExceptHandler(None, "x", [])], [], [])
self.stmt(t, "empty body on ExceptHandler")
e = [ast.ExceptHandler(ast.Name("x", ast.Store()), "y", [p])]
self.stmt(ast.Try([p], e, [], []), "must have Load context")
e = [ast.ExceptHandler(None, "x", [p])]
t = ast.Try([p], e, [ast.Expr(ast.Name("x", ast.Store()))], [p])
self.stmt(t, "must have Load context")
t = ast.Try([p], e, [p], [ast.Expr(ast.Name("x", ast.Store()))])
self.stmt(t, "must have Load context")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_assert(self):
self.stmt(ast.Assert(ast.Name("x", ast.Store()), None),
"must have Load context")
assrt = ast.Assert(ast.Name("x", ast.Load()),
ast.Name("y", ast.Store()))
self.stmt(assrt, "must have Load context")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_import(self):
self.stmt(ast.Import([]), "empty names on Import")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_importfrom(self):
imp = ast.ImportFrom(None, [ast.alias("x", None)], -42)
self.stmt(imp, "Negative ImportFrom level")
self.stmt(ast.ImportFrom(None, [], 0), "empty names on ImportFrom")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_global(self):
self.stmt(ast.Global([]), "empty names on Global")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_nonlocal(self):
self.stmt(ast.Nonlocal([]), "empty names on Nonlocal")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_expr(self):
e = ast.Expr(ast.Name("x", ast.Store()))
self.stmt(e, "must have Load context")
@unittest.skip("TODO: RUSTPYTHON, thread 'main' panicked at 'called `Option::unwrap()` on a `None` value'")
def test_boolop(self):
b = ast.BoolOp(ast.And(), [])
self.expr(b, "less than 2 values")
b = ast.BoolOp(ast.And(), [ast.Num(3)])
self.expr(b, "less than 2 values")
b = ast.BoolOp(ast.And(), [ast.Num(4), None])
self.expr(b, "None disallowed")
b = ast.BoolOp(ast.And(), [ast.Num(4), ast.Name("x", ast.Store())])
self.expr(b, "must have Load context")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_unaryop(self):
u = ast.UnaryOp(ast.Not(), ast.Name("x", ast.Store()))
self.expr(u, "must have Load context")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_lambda(self):
a = ast.arguments([], [], None, [], [], None, [])
self.expr(ast.Lambda(a, ast.Name("x", ast.Store())),
"must have Load context")
def fac(args):
return ast.Lambda(args, ast.Name("x", ast.Load()))
self._check_arguments(fac, self.expr)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_ifexp(self):
l = ast.Name("x", ast.Load())
s = ast.Name("y", ast.Store())
for args in (s, l, l), (l, s, l), (l, l, s):
self.expr(ast.IfExp(*args), "must have Load context")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_dict(self):
d = ast.Dict([], [ast.Name("x", ast.Load())])
self.expr(d, "same number of keys as values")
d = ast.Dict([ast.Name("x", ast.Load())], [None])
self.expr(d, "None disallowed")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_set(self):
self.expr(ast.Set([None]), "None disallowed")
s = ast.Set([ast.Name("x", ast.Store())])
self.expr(s, "must have Load context")
def _check_comprehension(self, fac):
self.expr(fac([]), "comprehension with no generators")
g = ast.comprehension(ast.Name("x", ast.Load()),
ast.Name("x", ast.Load()), [], 0)
self.expr(fac([g]), "must have Store context")
g = ast.comprehension(ast.Name("x", ast.Store()),
ast.Name("x", ast.Store()), [], 0)
self.expr(fac([g]), "must have Load context")
x = ast.Name("x", ast.Store())
y = ast.Name("y", ast.Load())
g = ast.comprehension(x, y, [None], 0)
self.expr(fac([g]), "None disallowed")
g = ast.comprehension(x, y, [ast.Name("x", ast.Store())], 0)
self.expr(fac([g]), "must have Load context")
def _simple_comp(self, fac):
g = ast.comprehension(ast.Name("x", ast.Store()),
ast.Name("x", ast.Load()), [], 0)
self.expr(fac(ast.Name("x", ast.Store()), [g]),
"must have Load context")
def wrap(gens):
return fac(ast.Name("x", ast.Store()), gens)
self._check_comprehension(wrap)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_listcomp(self):
self._simple_comp(ast.ListComp)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_setcomp(self):
self._simple_comp(ast.SetComp)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_generatorexp(self):
self._simple_comp(ast.GeneratorExp)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_dictcomp(self):
g = ast.comprehension(ast.Name("y", ast.Store()),
ast.Name("p", ast.Load()), [], 0)
c = ast.DictComp(ast.Name("x", ast.Store()),
ast.Name("y", ast.Load()), [g])
self.expr(c, "must have Load context")
c = ast.DictComp(ast.Name("x", ast.Load()),
ast.Name("y", ast.Store()), [g])
self.expr(c, "must have Load context")
def factory(comps):
k = ast.Name("x", ast.Load())
v = ast.Name("y", ast.Load())
return ast.DictComp(k, v, comps)
self._check_comprehension(factory)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_yield(self):
self.expr(ast.Yield(ast.Name("x", ast.Store())), "must have Load")
self.expr(ast.YieldFrom(ast.Name("x", ast.Store())), "must have Load")
@unittest.skip("TODO: RUSTPYTHON, thread 'main' panicked at 'assertion failed: `(left == right)` left: `0`, right: `1`'")
def test_compare(self):
left = ast.Name("x", ast.Load())
comp = ast.Compare(left, [ast.In()], [])
self.expr(comp, "no comparators")
comp = ast.Compare(left, [ast.In()], [ast.Num(4), ast.Num(5)])
self.expr(comp, "different number of comparators and operands")
comp = ast.Compare(ast.Num("blah"), [ast.In()], [left])
self.expr(comp)
comp = ast.Compare(left, [ast.In()], [ast.Num("blah")])
self.expr(comp)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_call(self):
func = ast.Name("x", ast.Load())
args = [ast.Name("y", ast.Load())]
keywords = [ast.keyword("w", ast.Name("z", ast.Load()))]
call = ast.Call(ast.Name("x", ast.Store()), args, keywords)
self.expr(call, "must have Load context")
call = ast.Call(func, [None], keywords)
self.expr(call, "None disallowed")
bad_keywords = [ast.keyword("w", ast.Name("z", ast.Store()))]
call = ast.Call(func, args, bad_keywords)
self.expr(call, "must have Load context")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_num(self):
class subint(int):
pass
class subfloat(float):
pass
class subcomplex(complex):
pass
for obj in "0", "hello":
self.expr(ast.Num(obj))
for obj in subint(), subfloat(), subcomplex():
self.expr(ast.Num(obj), "invalid type", exc=TypeError)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_attribute(self):
attr = ast.Attribute(ast.Name("x", ast.Store()), "y", ast.Load())
self.expr(attr, "must have Load context")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_subscript(self):
sub = ast.Subscript(ast.Name("x", ast.Store()), ast.Num(3),
ast.Load())
self.expr(sub, "must have Load context")
x = ast.Name("x", ast.Load())
sub = ast.Subscript(x, ast.Name("y", ast.Store()),
ast.Load())
self.expr(sub, "must have Load context")
s = ast.Name("x", ast.Store())
for args in (s, None, None), (None, s, None), (None, None, s):
sl = ast.Slice(*args)
self.expr(ast.Subscript(x, sl, ast.Load()),
"must have Load context")
sl = ast.Tuple([], ast.Load())
self.expr(ast.Subscript(x, sl, ast.Load()))
sl = ast.Tuple([s], ast.Load())
self.expr(ast.Subscript(x, sl, ast.Load()), "must have Load context")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_starred(self):
left = ast.List([ast.Starred(ast.Name("x", ast.Load()), ast.Store())],
ast.Store())
assign = ast.Assign([left], ast.Num(4))
self.stmt(assign, "must have Store context")
def _sequence(self, fac):
self.expr(fac([None], ast.Load()), "None disallowed")
self.expr(fac([ast.Name("x", ast.Store())], ast.Load()),
"must have Load context")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_list(self):
self._sequence(ast.List)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_tuple(self):
self._sequence(ast.Tuple)
def test_nameconstant(self):
self.expr(ast.NameConstant(4))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_stdlib_validates(self):
stdlib = os.path.dirname(ast.__file__)
tests = [fn for fn in os.listdir(stdlib) if fn.endswith(".py")]
tests.extend(["test/test_grammar.py", "test/test_unpack_ex.py"])
for module in tests:
with self.subTest(module):
fn = os.path.join(stdlib, module)
with open(fn, "r", encoding="utf-8") as fp:
source = fp.read()
mod = ast.parse(source, fn)
compile(mod, fn, "exec")
constant_1 = ast.Constant(1)
pattern_1 = ast.MatchValue(constant_1)
constant_x = ast.Constant('x')
pattern_x = ast.MatchValue(constant_x)
constant_true = ast.Constant(True)
pattern_true = ast.MatchSingleton(True)
name_carter = ast.Name('carter', ast.Load())
_MATCH_PATTERNS = [
ast.MatchValue(
ast.Attribute(
ast.Attribute(
ast.Name('x', ast.Store()),
'y', ast.Load()
),
'z', ast.Load()
)
),
ast.MatchValue(
ast.Attribute(
ast.Attribute(
ast.Name('x', ast.Load()),
'y', ast.Store()
),
'z', ast.Load()
)
),
ast.MatchValue(
ast.Constant(...)
),
ast.MatchValue(
ast.Constant(True)
),
ast.MatchValue(
ast.Constant((1,2,3))
),
ast.MatchSingleton('string'),
ast.MatchSequence([
ast.MatchSingleton('string')
]),
ast.MatchSequence(
[
ast.MatchSequence(
[
ast.MatchSingleton('string')
]
)
]
),
ast.MatchMapping(
[constant_1, constant_true],
[pattern_x]
),
ast.MatchMapping(
[constant_true, constant_1],
[pattern_x, pattern_1],
rest='True'
),
ast.MatchMapping(
[constant_true, ast.Starred(ast.Name('lol', ast.Load()), ast.Load())],
[pattern_x, pattern_1],
rest='legit'
),
ast.MatchClass(
ast.Attribute(
ast.Attribute(
constant_x,
'y', ast.Load()),
'z', ast.Load()),
patterns=[], kwd_attrs=[], kwd_patterns=[]
),
ast.MatchClass(
name_carter,
patterns=[],
kwd_attrs=['True'],
kwd_patterns=[pattern_1]
),
ast.MatchClass(
name_carter,
patterns=[],
kwd_attrs=[],
kwd_patterns=[pattern_1]
),
ast.MatchClass(
name_carter,
patterns=[ast.MatchSingleton('string')],
kwd_attrs=[],
kwd_patterns=[]
),
ast.MatchClass(
name_carter,
patterns=[ast.MatchStar()],
kwd_attrs=[],
kwd_patterns=[]
),
ast.MatchClass(
name_carter,
patterns=[],
kwd_attrs=[],
kwd_patterns=[ast.MatchStar()]
),
ast.MatchSequence(
[
ast.MatchStar("True")
]
),
ast.MatchAs(
name='False'
),
ast.MatchOr(
[]
),
ast.MatchOr(
[pattern_1]
),
ast.MatchOr(
[pattern_1, pattern_x, ast.MatchSingleton('xxx')]
),
ast.MatchAs(name="_"),
ast.MatchStar(name="x"),
ast.MatchSequence([ast.MatchStar("_")]),
ast.MatchMapping([], [], rest="_"),
]
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_match_validation_pattern(self):
name_x = ast.Name('x', ast.Load())
for pattern in self._MATCH_PATTERNS:
with self.subTest(ast.dump(pattern, indent=4)):
node = ast.Match(
subject=name_x,
cases = [
ast.match_case(
pattern=pattern,
body = [ast.Pass()]
)
]
)
node = ast.fix_missing_locations(node)
module = ast.Module([node], [])
with self.assertRaises(ValueError):
compile(module, "<test>", "exec")
class ConstantTests(unittest.TestCase):
"""Tests on the ast.Constant node type."""
def compile_constant(self, value):
tree = ast.parse("x = 123")
node = tree.body[0].value
new_node = ast.Constant(value=value)
ast.copy_location(new_node, node)
tree.body[0].value = new_node
code = compile(tree, "<string>", "exec")
ns = {}
exec(code, ns)
return ns['x']
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_validation(self):
with self.assertRaises(TypeError) as cm:
self.compile_constant([1, 2, 3])
self.assertEqual(str(cm.exception),
"got an invalid type in Constant: list")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_singletons(self):
for const in (None, False, True, Ellipsis, b'', frozenset()):
with self.subTest(const=const):
value = self.compile_constant(const)
self.assertIs(value, const)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_values(self):
nested_tuple = (1,)
nested_frozenset = frozenset({1})
for level in range(3):
nested_tuple = (nested_tuple, 2)
nested_frozenset = frozenset({nested_frozenset, 2})
values = (123, 123.0, 123j,
"unicode", b'bytes',
tuple("tuple"), frozenset("frozenset"),
nested_tuple, nested_frozenset)
for value in values:
with self.subTest(value=value):
result = self.compile_constant(value)
self.assertEqual(result, value)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_assign_to_constant(self):
tree = ast.parse("x = 1")
target = tree.body[0].targets[0]
new_target = ast.Constant(value=1)
ast.copy_location(new_target, target)
tree.body[0].targets[0] = new_target
with self.assertRaises(ValueError) as cm:
compile(tree, "string", "exec")
self.assertEqual(str(cm.exception),
"expression which can't be assigned "
"to in Store context")
def test_get_docstring(self):
tree = ast.parse("'docstring'\nx = 1")
self.assertEqual(ast.get_docstring(tree), 'docstring')
def get_load_const(self, tree):
# Compile to bytecode, disassemble and get parameter of LOAD_CONST
# instructions
co = compile(tree, '<string>', 'exec')
consts = []
for instr in dis.get_instructions(co):
if instr.opname == 'LOAD_CONST':
consts.append(instr.argval)
return consts
@support.cpython_only
def test_load_const(self):
consts = [None,
True, False,
124,
2.0,
3j,
"unicode",
b'bytes',
(1, 2, 3)]
code = '\n'.join(['x={!r}'.format(const) for const in consts])
code += '\nx = ...'
consts.extend((Ellipsis, None))
tree = ast.parse(code)
self.assertEqual(self.get_load_const(tree),
consts)
# Replace expression nodes with constants
for assign, const in zip(tree.body, consts):
assert isinstance(assign, ast.Assign), ast.dump(assign)
new_node = ast.Constant(value=const)
ast.copy_location(new_node, assign.value)
assign.value = new_node
self.assertEqual(self.get_load_const(tree),
consts)
def test_literal_eval(self):
tree = ast.parse("1 + 2")
binop = tree.body[0].value
new_left = ast.Constant(value=10)
ast.copy_location(new_left, binop.left)
binop.left = new_left
new_right = ast.Constant(value=20j)
ast.copy_location(new_right, binop.right)
binop.right = new_right
self.assertEqual(ast.literal_eval(binop), 10+20j)
def test_string_kind(self):
c = ast.parse('"x"', mode='eval').body
self.assertEqual(c.value, "x")
self.assertEqual(c.kind, None)
c = ast.parse('u"x"', mode='eval').body
self.assertEqual(c.value, "x")
self.assertEqual(c.kind, "u")
c = ast.parse('r"x"', mode='eval').body
self.assertEqual(c.value, "x")
self.assertEqual(c.kind, None)
c = ast.parse('b"x"', mode='eval').body
self.assertEqual(c.value, b"x")
self.assertEqual(c.kind, None)
class EndPositionTests(unittest.TestCase):
"""Tests for end position of AST nodes.
Testing end positions of nodes requires a bit of extra care
because of how LL parsers work.
"""
def _check_end_pos(self, ast_node, end_lineno, end_col_offset):
self.assertEqual(ast_node.end_lineno, end_lineno)
self.assertEqual(ast_node.end_col_offset, end_col_offset)
def _check_content(self, source, ast_node, content):
self.assertEqual(ast.get_source_segment(source, ast_node), content)
def _parse_value(self, s):
# Use duck-typing to support both single expression
# and a right hand side of an assignment statement.
return ast.parse(s).body[0].value
def test_lambda(self):
s = 'lambda x, *y: None'
lam = self._parse_value(s)
self._check_content(s, lam.body, 'None')
self._check_content(s, lam.args.args[0], 'x')
self._check_content(s, lam.args.vararg, 'y')
def test_func_def(self):
s = dedent('''
def func(x: int,
*args: str,
z: float = 0,
**kwargs: Any) -> bool:
return True
''').strip()
fdef = ast.parse(s).body[0]
self._check_end_pos(fdef, 5, 15)
self._check_content(s, fdef.body[0], 'return True')
self._check_content(s, fdef.args.args[0], 'x: int')
self._check_content(s, fdef.args.args[0].annotation, 'int')
self._check_content(s, fdef.args.kwarg, 'kwargs: Any')
self._check_content(s, fdef.args.kwarg.annotation, 'Any')
def test_call(self):
s = 'func(x, y=2, **kw)'
call = self._parse_value(s)
self._check_content(s, call.func, 'func')
self._check_content(s, call.keywords[0].value, '2')
self._check_content(s, call.keywords[1].value, 'kw')
def test_call_noargs(self):
s = 'x[0]()'
call = self._parse_value(s)
self._check_content(s, call.func, 'x[0]')
self._check_end_pos(call, 1, 6)
def test_class_def(self):
s = dedent('''
class C(A, B):
x: int = 0
''').strip()
cdef = ast.parse(s).body[0]
self._check_end_pos(cdef, 2, 14)
self._check_content(s, cdef.bases[1], 'B')
self._check_content(s, cdef.body[0], 'x: int = 0')
def test_class_kw(self):
s = 'class S(metaclass=abc.ABCMeta): pass'
cdef = ast.parse(s).body[0]
self._check_content(s, cdef.keywords[0].value, 'abc.ABCMeta')
def test_multi_line_str(self):
s = dedent('''
x = """Some multi-line text.
It goes on starting from same indent."""
''').strip()
assign = ast.parse(s).body[0]
self._check_end_pos(assign, 3, 40)
self._check_end_pos(assign.value, 3, 40)
def test_continued_str(self):
s = dedent('''
x = "first part" \\
"second part"
''').strip()
assign = ast.parse(s).body[0]
self._check_end_pos(assign, 2, 13)
self._check_end_pos(assign.value, 2, 13)
def test_suites(self):
# We intentionally put these into the same string to check
# that empty lines are not part of the suite.
s = dedent('''
while True:
pass
if one():
x = None
elif other():
y = None
else:
z = None
for x, y in stuff:
assert True
try:
raise RuntimeError
except TypeError as e:
pass
pass
''').strip()
mod = ast.parse(s)
while_loop = mod.body[0]
if_stmt = mod.body[1]
for_loop = mod.body[2]
try_stmt = mod.body[3]
pass_stmt = mod.body[4]
self._check_end_pos(while_loop, 2, 8)
self._check_end_pos(if_stmt, 9, 12)
self._check_end_pos(for_loop, 12, 15)
self._check_end_pos(try_stmt, 17, 8)
self._check_end_pos(pass_stmt, 19, 4)
self._check_content(s, while_loop.test, 'True')
self._check_content(s, if_stmt.body[0], 'x = None')
self._check_content(s, if_stmt.orelse[0].test, 'other()')
self._check_content(s, for_loop.target, 'x, y')
self._check_content(s, try_stmt.body[0], 'raise RuntimeError')
self._check_content(s, try_stmt.handlers[0].type, 'TypeError')
def test_fstring(self):
s = 'x = f"abc {x + y} abc"'
fstr = self._parse_value(s)
binop = fstr.values[1].value
self._check_content(s, binop, 'x + y')
def test_fstring_multi_line(self):
s = dedent('''
f"""Some multi-line text.
{
arg_one
+
arg_two
}
It goes on..."""
''').strip()
fstr = self._parse_value(s)
binop = fstr.values[1].value
self._check_end_pos(binop, 5, 7)
self._check_content(s, binop.left, 'arg_one')
self._check_content(s, binop.right, 'arg_two')
def test_import_from_multi_line(self):
s = dedent('''
from x.y.z import (
a, b, c as c
)
''').strip()
imp = ast.parse(s).body[0]
self._check_end_pos(imp, 3, 1)
self._check_end_pos(imp.names[2], 2, 16)
def test_slices(self):
s1 = 'f()[1, 2] [0]'
s2 = 'x[ a.b: c.d]'
sm = dedent('''
x[ a.b: f () ,
g () : c.d
]
''').strip()
i1, i2, im = map(self._parse_value, (s1, s2, sm))
self._check_content(s1, i1.value, 'f()[1, 2]')
self._check_content(s1, i1.value.slice, '1, 2')
self._check_content(s2, i2.slice.lower, 'a.b')
self._check_content(s2, i2.slice.upper, 'c.d')
self._check_content(sm, im.slice.elts[0].upper, 'f ()')
self._check_content(sm, im.slice.elts[1].lower, 'g ()')
self._check_end_pos(im, 3, 3)
def test_binop(self):
s = dedent('''
(1 * 2 + (3 ) +
4
)
''').strip()
binop = self._parse_value(s)
self._check_end_pos(binop, 2, 6)
self._check_content(s, binop.right, '4')
self._check_content(s, binop.left, '1 * 2 + (3 )')
self._check_content(s, binop.left.right, '3')
def test_boolop(self):
s = dedent('''
if (one_condition and
(other_condition or yet_another_one)):
pass
''').strip()
bop = ast.parse(s).body[0].test
self._check_end_pos(bop, 2, 44)
self._check_content(s, bop.values[1],
'other_condition or yet_another_one')
def test_tuples(self):
s1 = 'x = () ;'
s2 = 'x = 1 , ;'
s3 = 'x = (1 , 2 ) ;'
sm = dedent('''
x = (
a, b,
)
''').strip()
t1, t2, t3, tm = map(self._parse_value, (s1, s2, s3, sm))
self._check_content(s1, t1, '()')
self._check_content(s2, t2, '1 ,')
self._check_content(s3, t3, '(1 , 2 )')
self._check_end_pos(tm, 3, 1)
def test_attribute_spaces(self):
s = 'func(x. y .z)'
call = self._parse_value(s)
self._check_content(s, call, s)
self._check_content(s, call.args[0], 'x. y .z')
def test_redundant_parenthesis(self):
s = '( ( ( a + b ) ) )'
v = ast.parse(s).body[0].value
self.assertEqual(type(v).__name__, 'BinOp')
self._check_content(s, v, 'a + b')
s2 = 'await ' + s
v = ast.parse(s2).body[0].value.value
self.assertEqual(type(v).__name__, 'BinOp')
self._check_content(s2, v, 'a + b')
def test_trailers_with_redundant_parenthesis(self):
tests = (
('( ( ( a ) ) ) ( )', 'Call'),
('( ( ( a ) ) ) ( b )', 'Call'),
('( ( ( a ) ) ) [ b ]', 'Subscript'),
('( ( ( a ) ) ) . b', 'Attribute'),
)
for s, t in tests:
with self.subTest(s):
v = ast.parse(s).body[0].value
self.assertEqual(type(v).__name__, t)
self._check_content(s, v, s)
s2 = 'await ' + s
v = ast.parse(s2).body[0].value.value
self.assertEqual(type(v).__name__, t)
self._check_content(s2, v, s)
def test_displays(self):
s1 = '[{}, {1, }, {1, 2,} ]'
s2 = '{a: b, f (): g () ,}'
c1 = self._parse_value(s1)
c2 = self._parse_value(s2)
self._check_content(s1, c1.elts[0], '{}')
self._check_content(s1, c1.elts[1], '{1, }')
self._check_content(s1, c1.elts[2], '{1, 2,}')
self._check_content(s2, c2.keys[1], 'f ()')
self._check_content(s2, c2.values[1], 'g ()')
def test_comprehensions(self):
s = dedent('''
x = [{x for x, y in stuff
if cond.x} for stuff in things]
''').strip()
cmp = self._parse_value(s)
self._check_end_pos(cmp, 2, 37)
self._check_content(s, cmp.generators[0].iter, 'things')
self._check_content(s, cmp.elt.generators[0].iter, 'stuff')
self._check_content(s, cmp.elt.generators[0].ifs[0], 'cond.x')
self._check_content(s, cmp.elt.generators[0].target, 'x, y')
def test_yield_await(self):
s = dedent('''
async def f():
yield x
await y
''').strip()
fdef = ast.parse(s).body[0]
self._check_content(s, fdef.body[0].value, 'yield x')
self._check_content(s, fdef.body[1].value, 'await y')
def test_source_segment_multi(self):
s_orig = dedent('''
x = (
a, b,
) + ()
''').strip()
s_tuple = dedent('''
(
a, b,
)
''').strip()
binop = self._parse_value(s_orig)
self.assertEqual(ast.get_source_segment(s_orig, binop.left), s_tuple)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_source_segment_padded(self):
s_orig = dedent('''
class C:
def fun(self) -> None:
"ЖЖЖЖЖ"
''').strip()
s_method = ' def fun(self) -> None:\n' \
' "ЖЖЖЖЖ"'
cdef = ast.parse(s_orig).body[0]
self.assertEqual(ast.get_source_segment(s_orig, cdef.body[0], padded=True),
s_method)
def test_source_segment_endings(self):
s = 'v = 1\r\nw = 1\nx = 1\n\ry = 1\rz = 1\r\n'
v, w, x, y, z = ast.parse(s).body
self._check_content(s, v, 'v = 1')
self._check_content(s, w, 'w = 1')
self._check_content(s, x, 'x = 1')
self._check_content(s, y, 'y = 1')
self._check_content(s, z, 'z = 1')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_source_segment_tabs(self):
s = dedent('''
class C:
\t\f def fun(self) -> None:
\t\f pass
''').strip()
s_method = ' \t\f def fun(self) -> None:\n' \
' \t\f pass'
cdef = ast.parse(s).body[0]
self.assertEqual(ast.get_source_segment(s, cdef.body[0], padded=True), s_method)
def test_source_segment_missing_info(self):
s = 'v = 1\r\nw = 1\nx = 1\n\ry = 1\r\n'
v, w, x, y = ast.parse(s).body
del v.lineno
del w.end_lineno
del x.col_offset
del y.end_col_offset
self.assertIsNone(ast.get_source_segment(s, v))
self.assertIsNone(ast.get_source_segment(s, w))
self.assertIsNone(ast.get_source_segment(s, x))
self.assertIsNone(ast.get_source_segment(s, y))
class NodeVisitorTests(unittest.TestCase):
def test_old_constant_nodes(self):
class Visitor(ast.NodeVisitor):
def visit_Num(self, node):
log.append((node.lineno, 'Num', node.n))
def visit_Str(self, node):
log.append((node.lineno, 'Str', node.s))
def visit_Bytes(self, node):
log.append((node.lineno, 'Bytes', node.s))
def visit_NameConstant(self, node):
log.append((node.lineno, 'NameConstant', node.value))
def visit_Ellipsis(self, node):
log.append((node.lineno, 'Ellipsis', ...))
mod = ast.parse(dedent('''\
i = 42
f = 4.25
c = 4.25j
s = 'string'
b = b'bytes'
t = True
n = None
e = ...
'''))
visitor = Visitor()
log = []
with warnings.catch_warnings(record=True) as wlog:
warnings.filterwarnings('always', '', DeprecationWarning)
visitor.visit(mod)
self.assertEqual(log, [
(1, 'Num', 42),
(2, 'Num', 4.25),
(3, 'Num', 4.25j),
(4, 'Str', 'string'),
(5, 'Bytes', b'bytes'),
(6, 'NameConstant', True),
(7, 'NameConstant', None),
(8, 'Ellipsis', ...),
])
self.assertEqual([str(w.message) for w in wlog], [
'visit_Num is deprecated; add visit_Constant',
'visit_Num is deprecated; add visit_Constant',
'visit_Num is deprecated; add visit_Constant',
'visit_Str is deprecated; add visit_Constant',
'visit_Bytes is deprecated; add visit_Constant',
'visit_NameConstant is deprecated; add visit_Constant',
'visit_NameConstant is deprecated; add visit_Constant',
'visit_Ellipsis is deprecated; add visit_Constant',
])
@support.cpython_only
class ModuleStateTests(unittest.TestCase):
# bpo-41194, bpo-41261, bpo-41631: The _ast module uses a global state.
def check_ast_module(self):
# Check that the _ast module still works as expected
code = 'x + 1'
filename = '<string>'
mode = 'eval'
# Create _ast.AST subclasses instances
ast_tree = compile(code, filename, mode, flags=ast.PyCF_ONLY_AST)
# Call PyAST_Check()
code = compile(ast_tree, filename, mode)
self.assertIsInstance(code, types.CodeType)
def test_reload_module(self):
# bpo-41194: Importing the _ast module twice must not crash.
with support.swap_item(sys.modules, '_ast', None):
del sys.modules['_ast']
import _ast as ast1
del sys.modules['_ast']
import _ast as ast2
self.check_ast_module()
# Unloading the two _ast module instances must not crash.
del ast1
del ast2
support.gc_collect()
self.check_ast_module()
def test_sys_modules(self):
# bpo-41631: Test reproducing a Mercurial crash when PyAST_Check()
# imported the _ast module internally.
lazy_mod = object()
def my_import(name, *args, **kw):
sys.modules[name] = lazy_mod
return lazy_mod
with support.swap_item(sys.modules, '_ast', None):
del sys.modules['_ast']
with support.swap_attr(builtins, '__import__', my_import):
# Test that compile() does not import the _ast module
self.check_ast_module()
self.assertNotIn('_ast', sys.modules)
# Sanity check of the test itself
import _ast
self.assertIs(_ast, lazy_mod)
def test_subinterpreter(self):
# bpo-41631: Importing and using the _ast module in a subinterpreter
# must not crash.
code = dedent('''
import _ast
import ast
import gc
import sys
import types
# Create _ast.AST subclasses instances and call PyAST_Check()
ast_tree = compile('x+1', '<string>', 'eval',
flags=ast.PyCF_ONLY_AST)
code = compile(ast_tree, 'string', 'eval')
if not isinstance(code, types.CodeType):
raise AssertionError
# Unloading the _ast module must not crash.
del ast, _ast
del sys.modules['ast'], sys.modules['_ast']
gc.collect()
''')
res = support.run_in_subinterp(code)
self.assertEqual(res, 0)
def main():
if __name__ != '__main__':
return
if sys.argv[1:] == ['-g']:
for statements, kind in ((exec_tests, "exec"), (single_tests, "single"),
(eval_tests, "eval")):
print(kind+"_results = [")
for statement in statements:
tree = ast.parse(statement, "?", kind)
print("%r," % (to_tuple(tree),))
print("]")
print("main()")
raise SystemExit
unittest.main()
#### EVERYTHING BELOW IS GENERATED BY python Lib/test/test_ast.py -g #####
exec_results = [
('Module', [('Expr', (1, 0, 1, 4), ('Constant', (1, 0, 1, 4), None, None))], []),
('Module', [('Expr', (1, 0, 1, 18), ('Constant', (1, 0, 1, 18), 'module docstring', None))], []),
('Module', [('FunctionDef', (1, 0, 1, 13), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (1, 9, 1, 13))], [], None, None)], []),
('Module', [('FunctionDef', (1, 0, 1, 29), 'f', ('arguments', [], [], None, [], [], None, []), [('Expr', (1, 9, 1, 29), ('Constant', (1, 9, 1, 29), 'function docstring', None))], [], None, None)], []),
('Module', [('FunctionDef', (1, 0, 1, 14), 'f', ('arguments', [], [('arg', (1, 6, 1, 7), 'a', None, None)], None, [], [], None, []), [('Pass', (1, 10, 1, 14))], [], None, None)], []),
('Module', [('FunctionDef', (1, 0, 1, 16), 'f', ('arguments', [], [('arg', (1, 6, 1, 7), 'a', None, None)], None, [], [], None, [('Constant', (1, 8, 1, 9), 0, None)]), [('Pass', (1, 12, 1, 16))], [], None, None)], []),
('Module', [('FunctionDef', (1, 0, 1, 18), 'f', ('arguments', [], [], ('arg', (1, 7, 1, 11), 'args', None, None), [], [], None, []), [('Pass', (1, 14, 1, 18))], [], None, None)], []),
('Module', [('FunctionDef', (1, 0, 1, 21), 'f', ('arguments', [], [], None, [], [], ('arg', (1, 8, 1, 14), 'kwargs', None, None), []), [('Pass', (1, 17, 1, 21))], [], None, None)], []),
('Module', [('FunctionDef', (1, 0, 1, 71), 'f', ('arguments', [], [('arg', (1, 6, 1, 7), 'a', None, None), ('arg', (1, 9, 1, 10), 'b', None, None), ('arg', (1, 14, 1, 15), 'c', None, None), ('arg', (1, 22, 1, 23), 'd', None, None), ('arg', (1, 28, 1, 29), 'e', None, None)], ('arg', (1, 35, 1, 39), 'args', None, None), [('arg', (1, 41, 1, 42), 'f', None, None)], [('Constant', (1, 43, 1, 45), 42, None)], ('arg', (1, 49, 1, 55), 'kwargs', None, None), [('Constant', (1, 11, 1, 12), 1, None), ('Constant', (1, 16, 1, 20), None, None), ('List', (1, 24, 1, 26), [], ('Load',)), ('Dict', (1, 30, 1, 32), [], [])]), [('Expr', (1, 58, 1, 71), ('Constant', (1, 58, 1, 71), 'doc for f()', None))], [], None, None)], []),
('Module', [('ClassDef', (1, 0, 1, 12), 'C', [], [], [('Pass', (1, 8, 1, 12))], [])], []),
('Module', [('ClassDef', (1, 0, 1, 32), 'C', [], [], [('Expr', (1, 9, 1, 32), ('Constant', (1, 9, 1, 32), 'docstring for class C', None))], [])], []),
('Module', [('ClassDef', (1, 0, 1, 21), 'C', [('Name', (1, 8, 1, 14), 'object', ('Load',))], [], [('Pass', (1, 17, 1, 21))], [])], []),
('Module', [('FunctionDef', (1, 0, 1, 16), 'f', ('arguments', [], [], None, [], [], None, []), [('Return', (1, 8, 1, 16), ('Constant', (1, 15, 1, 16), 1, None))], [], None, None)], []),
('Module', [('Delete', (1, 0, 1, 5), [('Name', (1, 4, 1, 5), 'v', ('Del',))])], []),
('Module', [('Assign', (1, 0, 1, 5), [('Name', (1, 0, 1, 1), 'v', ('Store',))], ('Constant', (1, 4, 1, 5), 1, None), None)], []),
('Module', [('Assign', (1, 0, 1, 7), [('Tuple', (1, 0, 1, 3), [('Name', (1, 0, 1, 1), 'a', ('Store',)), ('Name', (1, 2, 1, 3), 'b', ('Store',))], ('Store',))], ('Name', (1, 6, 1, 7), 'c', ('Load',)), None)], []),
('Module', [('Assign', (1, 0, 1, 9), [('Tuple', (1, 0, 1, 5), [('Name', (1, 1, 1, 2), 'a', ('Store',)), ('Name', (1, 3, 1, 4), 'b', ('Store',))], ('Store',))], ('Name', (1, 8, 1, 9), 'c', ('Load',)), None)], []),
('Module', [('Assign', (1, 0, 1, 9), [('List', (1, 0, 1, 5), [('Name', (1, 1, 1, 2), 'a', ('Store',)), ('Name', (1, 3, 1, 4), 'b', ('Store',))], ('Store',))], ('Name', (1, 8, 1, 9), 'c', ('Load',)), None)], []),
('Module', [('AugAssign', (1, 0, 1, 6), ('Name', (1, 0, 1, 1), 'v', ('Store',)), ('Add',), ('Constant', (1, 5, 1, 6), 1, None))], []),
('Module', [('For', (1, 0, 1, 15), ('Name', (1, 4, 1, 5), 'v', ('Store',)), ('Name', (1, 9, 1, 10), 'v', ('Load',)), [('Pass', (1, 11, 1, 15))], [], None)], []),
('Module', [('While', (1, 0, 1, 12), ('Name', (1, 6, 1, 7), 'v', ('Load',)), [('Pass', (1, 8, 1, 12))], [])], []),
('Module', [('If', (1, 0, 1, 9), ('Name', (1, 3, 1, 4), 'v', ('Load',)), [('Pass', (1, 5, 1, 9))], [])], []),
('Module', [('If', (1, 0, 4, 6), ('Name', (1, 3, 1, 4), 'a', ('Load',)), [('Pass', (2, 2, 2, 6))], [('If', (3, 0, 4, 6), ('Name', (3, 5, 3, 6), 'b', ('Load',)), [('Pass', (4, 2, 4, 6))], [])])], []),
('Module', [('If', (1, 0, 6, 6), ('Name', (1, 3, 1, 4), 'a', ('Load',)), [('Pass', (2, 2, 2, 6))], [('If', (3, 0, 6, 6), ('Name', (3, 5, 3, 6), 'b', ('Load',)), [('Pass', (4, 2, 4, 6))], [('Pass', (6, 2, 6, 6))])])], []),
('Module', [('With', (1, 0, 1, 17), [('withitem', ('Name', (1, 5, 1, 6), 'x', ('Load',)), ('Name', (1, 10, 1, 11), 'y', ('Store',)))], [('Pass', (1, 13, 1, 17))], None)], []),
('Module', [('With', (1, 0, 1, 25), [('withitem', ('Name', (1, 5, 1, 6), 'x', ('Load',)), ('Name', (1, 10, 1, 11), 'y', ('Store',))), ('withitem', ('Name', (1, 13, 1, 14), 'z', ('Load',)), ('Name', (1, 18, 1, 19), 'q', ('Store',)))], [('Pass', (1, 21, 1, 25))], None)], []),
('Module', [('Raise', (1, 0, 1, 25), ('Call', (1, 6, 1, 25), ('Name', (1, 6, 1, 15), 'Exception', ('Load',)), [('Constant', (1, 16, 1, 24), 'string', None)], []), None)], []),
('Module', [('Try', (1, 0, 4, 6), [('Pass', (2, 2, 2, 6))], [('ExceptHandler', (3, 0, 4, 6), ('Name', (3, 7, 3, 16), 'Exception', ('Load',)), None, [('Pass', (4, 2, 4, 6))])], [], [])], []),
('Module', [('Try', (1, 0, 4, 6), [('Pass', (2, 2, 2, 6))], [], [], [('Pass', (4, 2, 4, 6))])], []),
('Module', [('Assert', (1, 0, 1, 8), ('Name', (1, 7, 1, 8), 'v', ('Load',)), None)], []),
('Module', [('Import', (1, 0, 1, 10), [('alias', (1, 7, 1, 10), 'sys', None)])], []),
('Module', [('ImportFrom', (1, 0, 1, 17), 'sys', [('alias', (1, 16, 1, 17), 'v', None)], 0)], []),
('Module', [('Global', (1, 0, 1, 8), ['v'])], []),
('Module', [('Expr', (1, 0, 1, 1), ('Constant', (1, 0, 1, 1), 1, None))], []),
('Module', [('Pass', (1, 0, 1, 4))], []),
('Module', [('For', (1, 0, 1, 16), ('Name', (1, 4, 1, 5), 'v', ('Store',)), ('Name', (1, 9, 1, 10), 'v', ('Load',)), [('Break', (1, 11, 1, 16))], [], None)], []),
('Module', [('For', (1, 0, 1, 19), ('Name', (1, 4, 1, 5), 'v', ('Store',)), ('Name', (1, 9, 1, 10), 'v', ('Load',)), [('Continue', (1, 11, 1, 19))], [], None)], []),
('Module', [('For', (1, 0, 1, 18), ('Tuple', (1, 4, 1, 7), [('Name', (1, 4, 1, 5), 'a', ('Store',)), ('Name', (1, 6, 1, 7), 'b', ('Store',))], ('Store',)), ('Name', (1, 11, 1, 12), 'c', ('Load',)), [('Pass', (1, 14, 1, 18))], [], None)], []),
('Module', [('For', (1, 0, 1, 20), ('Tuple', (1, 4, 1, 9), [('Name', (1, 5, 1, 6), 'a', ('Store',)), ('Name', (1, 7, 1, 8), 'b', ('Store',))], ('Store',)), ('Name', (1, 13, 1, 14), 'c', ('Load',)), [('Pass', (1, 16, 1, 20))], [], None)], []),
('Module', [('For', (1, 0, 1, 20), ('List', (1, 4, 1, 9), [('Name', (1, 5, 1, 6), 'a', ('Store',)), ('Name', (1, 7, 1, 8), 'b', ('Store',))], ('Store',)), ('Name', (1, 13, 1, 14), 'c', ('Load',)), [('Pass', (1, 16, 1, 20))], [], None)], []),
('Module', [('Expr', (1, 0, 11, 5), ('GeneratorExp', (1, 0, 11, 5), ('Tuple', (2, 4, 6, 5), [('Name', (3, 4, 3, 6), 'Aa', ('Load',)), ('Name', (5, 7, 5, 9), 'Bb', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (8, 4, 10, 6), [('Name', (8, 4, 8, 6), 'Aa', ('Store',)), ('Name', (10, 4, 10, 6), 'Bb', ('Store',))], ('Store',)), ('Name', (10, 10, 10, 12), 'Cc', ('Load',)), [], 0)]))], []),
('Module', [('Expr', (1, 0, 1, 34), ('DictComp', (1, 0, 1, 34), ('Name', (1, 1, 1, 2), 'a', ('Load',)), ('Name', (1, 5, 1, 6), 'b', ('Load',)), [('comprehension', ('Name', (1, 11, 1, 12), 'w', ('Store',)), ('Name', (1, 16, 1, 17), 'x', ('Load',)), [], 0), ('comprehension', ('Name', (1, 22, 1, 23), 'm', ('Store',)), ('Name', (1, 27, 1, 28), 'p', ('Load',)), [('Name', (1, 32, 1, 33), 'g', ('Load',))], 0)]))], []),
('Module', [('Expr', (1, 0, 1, 20), ('DictComp', (1, 0, 1, 20), ('Name', (1, 1, 1, 2), 'a', ('Load',)), ('Name', (1, 5, 1, 6), 'b', ('Load',)), [('comprehension', ('Tuple', (1, 11, 1, 14), [('Name', (1, 11, 1, 12), 'v', ('Store',)), ('Name', (1, 13, 1, 14), 'w', ('Store',))], ('Store',)), ('Name', (1, 18, 1, 19), 'x', ('Load',)), [], 0)]))], []),
('Module', [('Expr', (1, 0, 1, 19), ('SetComp', (1, 0, 1, 19), ('Name', (1, 1, 1, 2), 'r', ('Load',)), [('comprehension', ('Name', (1, 7, 1, 8), 'l', ('Store',)), ('Name', (1, 12, 1, 13), 'x', ('Load',)), [('Name', (1, 17, 1, 18), 'g', ('Load',))], 0)]))], []),
('Module', [('Expr', (1, 0, 1, 16), ('SetComp', (1, 0, 1, 16), ('Name', (1, 1, 1, 2), 'r', ('Load',)), [('comprehension', ('Tuple', (1, 7, 1, 10), [('Name', (1, 7, 1, 8), 'l', ('Store',)), ('Name', (1, 9, 1, 10), 'm', ('Store',))], ('Store',)), ('Name', (1, 14, 1, 15), 'x', ('Load',)), [], 0)]))], []),
('Module', [('AsyncFunctionDef', (1, 0, 3, 18), 'f', ('arguments', [], [], None, [], [], None, []), [('Expr', (2, 1, 2, 17), ('Constant', (2, 1, 2, 17), 'async function', None)), ('Expr', (3, 1, 3, 18), ('Await', (3, 1, 3, 18), ('Call', (3, 7, 3, 18), ('Name', (3, 7, 3, 16), 'something', ('Load',)), [], [])))], [], None, None)], []),
('Module', [('AsyncFunctionDef', (1, 0, 3, 8), 'f', ('arguments', [], [], None, [], [], None, []), [('AsyncFor', (2, 1, 3, 8), ('Name', (2, 11, 2, 12), 'e', ('Store',)), ('Name', (2, 16, 2, 17), 'i', ('Load',)), [('Expr', (2, 19, 2, 20), ('Constant', (2, 19, 2, 20), 1, None))], [('Expr', (3, 7, 3, 8), ('Constant', (3, 7, 3, 8), 2, None))], None)], [], None, None)], []),
('Module', [('AsyncFunctionDef', (1, 0, 2, 21), 'f', ('arguments', [], [], None, [], [], None, []), [('AsyncWith', (2, 1, 2, 21), [('withitem', ('Name', (2, 12, 2, 13), 'a', ('Load',)), ('Name', (2, 17, 2, 18), 'b', ('Store',)))], [('Expr', (2, 20, 2, 21), ('Constant', (2, 20, 2, 21), 1, None))], None)], [], None, None)], []),
('Module', [('Expr', (1, 0, 1, 14), ('Dict', (1, 0, 1, 14), [None, ('Constant', (1, 10, 1, 11), 2, None)], [('Dict', (1, 3, 1, 8), [('Constant', (1, 4, 1, 5), 1, None)], [('Constant', (1, 6, 1, 7), 2, None)]), ('Constant', (1, 12, 1, 13), 3, None)]))], []),
('Module', [('Expr', (1, 0, 1, 12), ('Set', (1, 0, 1, 12), [('Starred', (1, 1, 1, 8), ('Set', (1, 2, 1, 8), [('Constant', (1, 3, 1, 4), 1, None), ('Constant', (1, 6, 1, 7), 2, None)]), ('Load',)), ('Constant', (1, 10, 1, 11), 3, None)]))], []),
('Module', [('AsyncFunctionDef', (1, 0, 2, 21), 'f', ('arguments', [], [], None, [], [], None, []), [('Expr', (2, 1, 2, 21), ('ListComp', (2, 1, 2, 21), ('Name', (2, 2, 2, 3), 'i', ('Load',)), [('comprehension', ('Name', (2, 14, 2, 15), 'b', ('Store',)), ('Name', (2, 19, 2, 20), 'c', ('Load',)), [], 1)]))], [], None, None)], []),
('Module', [('FunctionDef', (4, 0, 4, 13), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (4, 9, 4, 13))], [('Name', (1, 1, 1, 6), 'deco1', ('Load',)), ('Call', (2, 1, 2, 8), ('Name', (2, 1, 2, 6), 'deco2', ('Load',)), [], []), ('Call', (3, 1, 3, 9), ('Name', (3, 1, 3, 6), 'deco3', ('Load',)), [('Constant', (3, 7, 3, 8), 1, None)], [])], None, None)], []),
('Module', [('AsyncFunctionDef', (4, 0, 4, 19), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (4, 15, 4, 19))], [('Name', (1, 1, 1, 6), 'deco1', ('Load',)), ('Call', (2, 1, 2, 8), ('Name', (2, 1, 2, 6), 'deco2', ('Load',)), [], []), ('Call', (3, 1, 3, 9), ('Name', (3, 1, 3, 6), 'deco3', ('Load',)), [('Constant', (3, 7, 3, 8), 1, None)], [])], None, None)], []),
('Module', [('ClassDef', (4, 0, 4, 13), 'C', [], [], [('Pass', (4, 9, 4, 13))], [('Name', (1, 1, 1, 6), 'deco1', ('Load',)), ('Call', (2, 1, 2, 8), ('Name', (2, 1, 2, 6), 'deco2', ('Load',)), [], []), ('Call', (3, 1, 3, 9), ('Name', (3, 1, 3, 6), 'deco3', ('Load',)), [('Constant', (3, 7, 3, 8), 1, None)], [])])], []),
('Module', [('FunctionDef', (2, 0, 2, 13), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (2, 9, 2, 13))], [('Call', (1, 1, 1, 19), ('Name', (1, 1, 1, 5), 'deco', ('Load',)), [('GeneratorExp', (1, 5, 1, 19), ('Name', (1, 6, 1, 7), 'a', ('Load',)), [('comprehension', ('Name', (1, 12, 1, 13), 'a', ('Store',)), ('Name', (1, 17, 1, 18), 'b', ('Load',)), [], 0)])], [])], None, None)], []),
('Module', [('FunctionDef', (2, 0, 2, 13), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (2, 9, 2, 13))], [('Attribute', (1, 1, 1, 6), ('Attribute', (1, 1, 1, 4), ('Name', (1, 1, 1, 2), 'a', ('Load',)), 'b', ('Load',)), 'c', ('Load',))], None, None)], []),
('Module', [('Expr', (1, 0, 1, 8), ('NamedExpr', (1, 1, 1, 7), ('Name', (1, 1, 1, 2), 'a', ('Store',)), ('Constant', (1, 6, 1, 7), 1, None)))], []),
('Module', [('FunctionDef', (1, 0, 1, 18), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [], None, [], [], None, []), [('Pass', (1, 14, 1, 18))], [], None, None)], []),
('Module', [('FunctionDef', (1, 0, 1, 26), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [('arg', (1, 12, 1, 13), 'c', None, None), ('arg', (1, 15, 1, 16), 'd', None, None), ('arg', (1, 18, 1, 19), 'e', None, None)], None, [], [], None, []), [('Pass', (1, 22, 1, 26))], [], None, None)], []),
('Module', [('FunctionDef', (1, 0, 1, 29), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [('arg', (1, 12, 1, 13), 'c', None, None)], None, [('arg', (1, 18, 1, 19), 'd', None, None), ('arg', (1, 21, 1, 22), 'e', None, None)], [None, None], None, []), [('Pass', (1, 25, 1, 29))], [], None, None)], []),
('Module', [('FunctionDef', (1, 0, 1, 39), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [('arg', (1, 12, 1, 13), 'c', None, None)], None, [('arg', (1, 18, 1, 19), 'd', None, None), ('arg', (1, 21, 1, 22), 'e', None, None)], [None, None], ('arg', (1, 26, 1, 32), 'kwargs', None, None), []), [('Pass', (1, 35, 1, 39))], [], None, None)], []),
('Module', [('FunctionDef', (1, 0, 1, 20), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [], None, [], [], None, [('Constant', (1, 8, 1, 9), 1, None)]), [('Pass', (1, 16, 1, 20))], [], None, None)], []),
('Module', [('FunctionDef', (1, 0, 1, 29), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [('arg', (1, 14, 1, 15), 'b', None, None), ('arg', (1, 19, 1, 20), 'c', None, None)], None, [], [], None, [('Constant', (1, 8, 1, 9), 1, None), ('Constant', (1, 16, 1, 17), 2, None), ('Constant', (1, 21, 1, 22), 4, None)]), [('Pass', (1, 25, 1, 29))], [], None, None)], []),
('Module', [('FunctionDef', (1, 0, 1, 32), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [('arg', (1, 14, 1, 15), 'b', None, None)], None, [('arg', (1, 22, 1, 23), 'c', None, None)], [('Constant', (1, 24, 1, 25), 4, None)], None, [('Constant', (1, 8, 1, 9), 1, None), ('Constant', (1, 16, 1, 17), 2, None)]), [('Pass', (1, 28, 1, 32))], [], None, None)], []),
('Module', [('FunctionDef', (1, 0, 1, 30), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [('arg', (1, 14, 1, 15), 'b', None, None)], None, [('arg', (1, 22, 1, 23), 'c', None, None)], [None], None, [('Constant', (1, 8, 1, 9), 1, None), ('Constant', (1, 16, 1, 17), 2, None)]), [('Pass', (1, 26, 1, 30))], [], None, None)], []),
('Module', [('FunctionDef', (1, 0, 1, 42), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [('arg', (1, 14, 1, 15), 'b', None, None)], None, [('arg', (1, 22, 1, 23), 'c', None, None)], [('Constant', (1, 24, 1, 25), 4, None)], ('arg', (1, 29, 1, 35), 'kwargs', None, None), [('Constant', (1, 8, 1, 9), 1, None), ('Constant', (1, 16, 1, 17), 2, None)]), [('Pass', (1, 38, 1, 42))], [], None, None)], []),
('Module', [('FunctionDef', (1, 0, 1, 40), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [('arg', (1, 14, 1, 15), 'b', None, None)], None, [('arg', (1, 22, 1, 23), 'c', None, None)], [None], ('arg', (1, 27, 1, 33), 'kwargs', None, None), [('Constant', (1, 8, 1, 9), 1, None), ('Constant', (1, 16, 1, 17), 2, None)]), [('Pass', (1, 36, 1, 40))], [], None, None)], []),
]
single_results = [
('Interactive', [('Expr', (1, 0, 1, 3), ('BinOp', (1, 0, 1, 3), ('Constant', (1, 0, 1, 1), 1, None), ('Add',), ('Constant', (1, 2, 1, 3), 2, None)))]),
]
eval_results = [
('Expression', ('Constant', (1, 0, 1, 4), None, None)),
('Expression', ('BoolOp', (1, 0, 1, 7), ('And',), [('Name', (1, 0, 1, 1), 'a', ('Load',)), ('Name', (1, 6, 1, 7), 'b', ('Load',))])),
('Expression', ('BinOp', (1, 0, 1, 5), ('Name', (1, 0, 1, 1), 'a', ('Load',)), ('Add',), ('Name', (1, 4, 1, 5), 'b', ('Load',)))),
('Expression', ('UnaryOp', (1, 0, 1, 5), ('Not',), ('Name', (1, 4, 1, 5), 'v', ('Load',)))),
('Expression', ('Lambda', (1, 0, 1, 11), ('arguments', [], [], None, [], [], None, []), ('Constant', (1, 7, 1, 11), None, None))),
('Expression', ('Dict', (1, 0, 1, 7), [('Constant', (1, 2, 1, 3), 1, None)], [('Constant', (1, 4, 1, 5), 2, None)])),
('Expression', ('Dict', (1, 0, 1, 2), [], [])),
('Expression', ('Set', (1, 0, 1, 7), [('Constant', (1, 1, 1, 5), None, None)])),
('Expression', ('Dict', (1, 0, 5, 6), [('Constant', (2, 6, 2, 7), 1, None)], [('Constant', (4, 10, 4, 11), 2, None)])),
('Expression', ('ListComp', (1, 0, 1, 19), ('Name', (1, 1, 1, 2), 'a', ('Load',)), [('comprehension', ('Name', (1, 7, 1, 8), 'b', ('Store',)), ('Name', (1, 12, 1, 13), 'c', ('Load',)), [('Name', (1, 17, 1, 18), 'd', ('Load',))], 0)])),
('Expression', ('GeneratorExp', (1, 0, 1, 19), ('Name', (1, 1, 1, 2), 'a', ('Load',)), [('comprehension', ('Name', (1, 7, 1, 8), 'b', ('Store',)), ('Name', (1, 12, 1, 13), 'c', ('Load',)), [('Name', (1, 17, 1, 18), 'd', ('Load',))], 0)])),
('Expression', ('ListComp', (1, 0, 1, 20), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11, 1, 14), [('Name', (1, 11, 1, 12), 'a', ('Store',)), ('Name', (1, 13, 1, 14), 'b', ('Store',))], ('Store',)), ('Name', (1, 18, 1, 19), 'c', ('Load',)), [], 0)])),
('Expression', ('ListComp', (1, 0, 1, 22), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11, 1, 16), [('Name', (1, 12, 1, 13), 'a', ('Store',)), ('Name', (1, 14, 1, 15), 'b', ('Store',))], ('Store',)), ('Name', (1, 20, 1, 21), 'c', ('Load',)), [], 0)])),
('Expression', ('ListComp', (1, 0, 1, 22), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('List', (1, 11, 1, 16), [('Name', (1, 12, 1, 13), 'a', ('Store',)), ('Name', (1, 14, 1, 15), 'b', ('Store',))], ('Store',)), ('Name', (1, 20, 1, 21), 'c', ('Load',)), [], 0)])),
('Expression', ('SetComp', (1, 0, 1, 20), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11, 1, 14), [('Name', (1, 11, 1, 12), 'a', ('Store',)), ('Name', (1, 13, 1, 14), 'b', ('Store',))], ('Store',)), ('Name', (1, 18, 1, 19), 'c', ('Load',)), [], 0)])),
('Expression', ('SetComp', (1, 0, 1, 22), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11, 1, 16), [('Name', (1, 12, 1, 13), 'a', ('Store',)), ('Name', (1, 14, 1, 15), 'b', ('Store',))], ('Store',)), ('Name', (1, 20, 1, 21), 'c', ('Load',)), [], 0)])),
('Expression', ('SetComp', (1, 0, 1, 22), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('List', (1, 11, 1, 16), [('Name', (1, 12, 1, 13), 'a', ('Store',)), ('Name', (1, 14, 1, 15), 'b', ('Store',))], ('Store',)), ('Name', (1, 20, 1, 21), 'c', ('Load',)), [], 0)])),
('Expression', ('GeneratorExp', (1, 0, 1, 20), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11, 1, 14), [('Name', (1, 11, 1, 12), 'a', ('Store',)), ('Name', (1, 13, 1, 14), 'b', ('Store',))], ('Store',)), ('Name', (1, 18, 1, 19), 'c', ('Load',)), [], 0)])),
('Expression', ('GeneratorExp', (1, 0, 1, 22), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11, 1, 16), [('Name', (1, 12, 1, 13), 'a', ('Store',)), ('Name', (1, 14, 1, 15), 'b', ('Store',))], ('Store',)), ('Name', (1, 20, 1, 21), 'c', ('Load',)), [], 0)])),
('Expression', ('GeneratorExp', (1, 0, 1, 22), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('List', (1, 11, 1, 16), [('Name', (1, 12, 1, 13), 'a', ('Store',)), ('Name', (1, 14, 1, 15), 'b', ('Store',))], ('Store',)), ('Name', (1, 20, 1, 21), 'c', ('Load',)), [], 0)])),
('Expression', ('Compare', (1, 0, 1, 9), ('Constant', (1, 0, 1, 1), 1, None), [('Lt',), ('Lt',)], [('Constant', (1, 4, 1, 5), 2, None), ('Constant', (1, 8, 1, 9), 3, None)])),
('Expression', ('Call', (1, 0, 1, 17), ('Name', (1, 0, 1, 1), 'f', ('Load',)), [('Constant', (1, 2, 1, 3), 1, None), ('Constant', (1, 4, 1, 5), 2, None), ('Starred', (1, 10, 1, 12), ('Name', (1, 11, 1, 12), 'd', ('Load',)), ('Load',))], [('keyword', (1, 6, 1, 9), 'c', ('Constant', (1, 8, 1, 9), 3, None)), ('keyword', (1, 13, 1, 16), None, ('Name', (1, 15, 1, 16), 'e', ('Load',)))])),
('Expression', ('Call', (1, 0, 1, 10), ('Name', (1, 0, 1, 1), 'f', ('Load',)), [('Starred', (1, 2, 1, 9), ('List', (1, 3, 1, 9), [('Constant', (1, 4, 1, 5), 0, None), ('Constant', (1, 7, 1, 8), 1, None)], ('Load',)), ('Load',))], [])),
('Expression', ('Call', (1, 0, 1, 15), ('Name', (1, 0, 1, 1), 'f', ('Load',)), [('GeneratorExp', (1, 1, 1, 15), ('Name', (1, 2, 1, 3), 'a', ('Load',)), [('comprehension', ('Name', (1, 8, 1, 9), 'a', ('Store',)), ('Name', (1, 13, 1, 14), 'b', ('Load',)), [], 0)])], [])),
('Expression', ('Constant', (1, 0, 1, 2), 10, None)),
('Expression', ('Constant', (1, 0, 1, 8), 'string', None)),
('Expression', ('Attribute', (1, 0, 1, 3), ('Name', (1, 0, 1, 1), 'a', ('Load',)), 'b', ('Load',))),
('Expression', ('Subscript', (1, 0, 1, 6), ('Name', (1, 0, 1, 1), 'a', ('Load',)), ('Slice', (1, 2, 1, 5), ('Name', (1, 2, 1, 3), 'b', ('Load',)), ('Name', (1, 4, 1, 5), 'c', ('Load',)), None), ('Load',))),
('Expression', ('Name', (1, 0, 1, 1), 'v', ('Load',))),
('Expression', ('List', (1, 0, 1, 7), [('Constant', (1, 1, 1, 2), 1, None), ('Constant', (1, 3, 1, 4), 2, None), ('Constant', (1, 5, 1, 6), 3, None)], ('Load',))),
('Expression', ('List', (1, 0, 1, 2), [], ('Load',))),
('Expression', ('Tuple', (1, 0, 1, 5), [('Constant', (1, 0, 1, 1), 1, None), ('Constant', (1, 2, 1, 3), 2, None), ('Constant', (1, 4, 1, 5), 3, None)], ('Load',))),
('Expression', ('Tuple', (1, 0, 1, 7), [('Constant', (1, 1, 1, 2), 1, None), ('Constant', (1, 3, 1, 4), 2, None), ('Constant', (1, 5, 1, 6), 3, None)], ('Load',))),
('Expression', ('Tuple', (1, 0, 1, 2), [], ('Load',))),
('Expression', ('Call', (1, 0, 1, 17), ('Attribute', (1, 0, 1, 7), ('Attribute', (1, 0, 1, 5), ('Attribute', (1, 0, 1, 3), ('Name', (1, 0, 1, 1), 'a', ('Load',)), 'b', ('Load',)), 'c', ('Load',)), 'd', ('Load',)), [('Subscript', (1, 8, 1, 16), ('Attribute', (1, 8, 1, 11), ('Name', (1, 8, 1, 9), 'a', ('Load',)), 'b', ('Load',)), ('Slice', (1, 12, 1, 15), ('Constant', (1, 12, 1, 13), 1, None), ('Constant', (1, 14, 1, 15), 2, None), None), ('Load',))], [])),
]
main()
|
385d72140937b4867c4b942410dc7bdd67a8cf41
|
a5cffc68c40887b34c298f98b3c684a84bbfe96e
|
/pyazhpc/arm.py
|
0af2411fe515dc3b03d7fbb1d90a5d51c80316f9
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azurehpc
|
54e3a852b0ef739ec598cfd751929aa0b004d5ff
|
f9766c25d7084bbab463182dadf9ed48e58a09ba
|
refs/heads/master
| 2023-08-19T01:44:44.088508
| 2023-08-02T18:27:10
| 2023-08-02T18:27:10
| 196,273,513
| 112
| 71
|
MIT
| 2023-08-02T18:27:11
| 2019-07-10T20:52:18
|
Shell
|
UTF-8
|
Python
| false
| false
| 51,202
|
py
|
arm.py
|
import json
import sys
import uuid
import azlog
import azutil
log = azlog.getLogger(__name__)
class ArmTemplate:
def __init__(self):
self.parameters = {}
self.variables = {}
self.resources = []
self.outputs = {}
self.avsets = set()
def _add_network(self, cfg):
resource_group = cfg["resource_group"]
vnet_resource_group = cfg["vnet"].get("resource_group", resource_group)
if resource_group != vnet_resource_group:
log.debug(f"using an existing vnet in {vnet_resource_group}")
return
location = cfg["location"]
vnet_name = cfg["vnet"]["name"]
address_prefix = cfg["vnet"]["address_prefix"]
subnet_names = cfg["vnet"]["subnets"]
subnets = []
for subnet_name in subnet_names:
subnet_address_prefix = cfg["vnet"]["subnets"][subnet_name]
subnets.append({
"name": subnet_name,
"properties": {
"addressPrefix": subnet_address_prefix
}
})
gtags = cfg.get("global_tags", {})
res = {
"apiVersion": "2018-10-01",
"type": "Microsoft.Network/virtualNetworks",
"name": vnet_name,
"location": location,
"tags": gtags,
"properties": {
"addressSpace": {
"addressPrefixes": [
address_prefix
]
},
"subnets": subnets
}
}
self.resources.append(res)
resource_group = cfg["resource_group"]
for peer_name in cfg["vnet"].get("peer", {}).keys():
peer_resource_group = cfg["vnet"]["peer"][peer_name]["resource_group"]
peer_vnet_name = cfg["vnet"]["peer"][peer_name]["vnet_name"]
peer_allow_vnet_access = cfg["vnet"]["peer"][peer_name].get("peer_allow_vnet_access", True)
peer_allow_forwarded_traffic = cfg["vnet"]["peer"][peer_name].get("peer_allow_forwarded_traffic", True)
vnet_allow_vnet_access = cfg["vnet"]["peer"][peer_name].get("vnet_allow_vnet_access", True)
vnet_allow_forwarded_traffic = cfg["vnet"]["peer"][peer_name].get("vent_allow_forwarded_traffic", True)
if "gateway" in cfg["vnet"]["peer"][peer_name]:
peer_allow_gateway_transit = cfg["vnet"]["peer"][peer_name]["gateway"].get("peer_allow_gateway_transit", False)
peer_use_remote_gateways = cfg["vnet"]["peer"][peer_name]["gateway"].get("peer_use_remote_gateways", False)
vnet_allow_gateway_transit = cfg["vnet"]["peer"][peer_name]["gateway"].get("vnet_allow_gateway_transit", False)
vnet_use_remote_gateways = cfg["vnet"]["peer"][peer_name]["gateway"].get("vnet_use_remote_gateways", False)
else:
peer_allow_gateway_transit = False
peer_use_remote_gateways = False
vnet_allow_gateway_transit = False
vnet_use_remote_gateways = False
self.resources.append({
"type": "Microsoft.Network/virtualNetworks/virtualNetworkPeerings",
"apiVersion": "2021-08-01",
"name": f"{vnet_name}/{peer_name}-{peer_resource_group}",
"properties": {
"remoteVirtualNetwork": {
"id": f"[resourceId('{peer_resource_group}', 'Microsoft.Network/virtualNetworks', '{peer_vnet_name}')]"
},
"allowVirtualNetworkAccess": peer_allow_vnet_access,
"allowForwardedTraffic": peer_allow_forwarded_traffic,
"allowGatewayTransit": peer_allow_gateway_transit,
"useRemoteGateways": peer_use_remote_gateways
},
"dependsOn": [
f"Microsoft.Network/virtualNetworks/{vnet_name}"
]
})
self.resources.append({
"type": "Microsoft.Resources/deployments",
"apiVersion": "2019-08-01",
"name": f"{peer_resource_group}peer",
"tags": gtags,
"resourceGroup": peer_resource_group,
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"type": "Microsoft.Network/virtualNetworks/virtualNetworkPeerings",
"apiVersion": "2021-08-01",
"name": f"{peer_vnet_name}/{peer_name}-{resource_group}",
"properties": {
"remoteVirtualNetwork": {
"id": f"[resourceId('{resource_group}', 'Microsoft.Network/virtualNetworks', '{vnet_name}')]"
},
"allowVirtualNetworkAccess": vnet_allow_vnet_access,
"allowForwardedTraffic": vnet_allow_forwarded_traffic,
"allowGatewayTransit": vnet_allow_gateway_transit,
"useRemoteGateways": vnet_use_remote_gateways
}
}
],
"outputs": {}
},
"parameters": {}
},
"dependsOn": [
f"Microsoft.Network/virtualNetworks/{vnet_name}"
]
})
# private dns
dns_domain = cfg["vnet"].get("dns_domain", None)
if dns_domain:
log.info(f"add private dns ({dns_domain})")
self.resources.append({
"type": "Microsoft.Network/privateDnsZones",
"apiVersion": "2018-09-01",
"name": dns_domain,
"tags": gtags,
"location": "global",
"properties": {},
"resources": [{
"type": "Microsoft.Network/privateDnsZones/virtualNetworkLinks",
"apiVersion": "2018-09-01",
"name": f"[concat('{dns_domain}', '/{vnet_name}')]",
"location": "global",
"dependsOn": [
f"[resourceId('Microsoft.Network/privateDnsZones', '{dns_domain}')]"
],
"properties": {
"registrationEnabled": True,
"virtualNetwork": {
"id": f"[resourceId('Microsoft.Network/virtualNetworks', '{vnet_name}')]"
}
}
}]
})
# add route tables first (and keep track of mapping to subnet)
route_table_map = {}
for route_name in cfg["vnet"].get("routes", {}).keys():
route_address_prefix = cfg["vnet"]["routes"][route_name]["address_prefix"]
route_next_hop = cfg["vnet"]["routes"][route_name]["next_hop"]
route_subnet = cfg["vnet"]["routes"][route_name]["subnet"]
route_table_map[route_subnet] = route_name
self.resources.append({
"type": "Microsoft.Network/routeTables",
"apiVersion": "2019-11-01",
"name": route_name,
"tags": gtags,
"location": location,
"properties": {
"disableBgpRoutePropagation": False,
"routes": [
{
"name": route_name,
"properties": {
"addressPrefix": route_address_prefix,
"nextHopType": "VirtualAppliance",
"nextHopIpAddress": f"[reference('{route_next_hop}_nic').ipConfigurations[0].properties.privateIPAddress]"
}
}
]
},
"dependsOn": [
f"Microsoft.Network/networkInterfaces/{route_next_hop}_nic"
]
})
self.resources.append({
"type": "Microsoft.Network/routeTables/routes",
"apiVersion": "2019-11-01",
"name": f"{route_name}/{route_name}",
"dependsOn": [
f"[resourceId('Microsoft.Network/routeTables', '{route_name}')]"
],
"properties": {
"addressPrefix": route_address_prefix,
"nextHopType": "VirtualAppliance",
"nextHopIpAddress": f"[reference('{route_next_hop}_nic').ipConfigurations[0].properties.privateIPAddress]"
}
})
subnet_address_prefix = cfg["vnet"]["subnets"][route_subnet]
self.resources.append({
"type": "Microsoft.Network/virtualNetworks/subnets",
"apiVersion": "2019-11-01",
"name": f"{vnet_name}/{route_subnet}",
"dependsOn": [
f"[resourceId('Microsoft.Network/routeTables', '{route_name}')]"
],
"properties": {
"addressPrefix": subnet_address_prefix,
"routeTable": {
"id": f"[resourceId('Microsoft.Network/routeTables', '{route_name}')]"
}
}
})
# vpn gateway
vpn_gateway_name = cfg["vnet"].get("gateway",{}).get("name", None)
if vpn_gateway_name:
log.info(f"add vpn gateway ({vpn_gateway_name})")
rrg = cfg["resource_group"]
vnetname = cfg["vnet"]["name"]
subnet = cfg["vnet"]["gateway"].get("subnet")
aad_tenant = cfg["vnet"]["gateway"].get("aad_tenant")
aad_audience = cfg["vnet"]["gateway"].get("aad_audience")
aad_issuer = cfg["vnet"]["gateway"].get("aad_issuer")
nicdeps = []
pipname = vpn_gateway_name+"_pip"
dnsname = azutil.get_dns_label(rrg, pipname, True)
if dnsname:
log.debug(f"dns name: {dnsname} (using existing one)")
else:
dnsname = vpn_gateway_name+str(uuid.uuid4())[:6]
log.debug(f"dns name: {dnsname}")
nicdeps.append("Microsoft.Network/publicIpAddresses/"+pipname)
pipres = {
"type": "Microsoft.Network/publicIPAddresses",
"apiVersion": "2018-01-01",
"name": pipname,
"location": location,
"dependsOn": [],
"tags": gtags,
"properties": {
"dnsSettings": {
"domainNameLabel": dnsname
}
}
}
self.resources.append(pipres)
self.resources.append({
"type": "Microsoft.Network/virtualNetworkGateways",
"apiVersion": "2020-11-01",
"name": vpn_gateway_name,
"tags": gtags,
"location": location,
"properties": {
"enablePrivateIpAddress": False,
"ipConfigurations": [
{
"name": "default",
"properties": {
"privateIPAllocationMethod": "Dynamic",
"publicIPAddress": {
"id": "[resourceId('Microsoft.Network/publicIPAddresses', '{}')]".format(pipname)
},
"subnet": {
"id": "[resourceId('Microsoft.Network/virtualNetworks/subnets', '{}', '{}')]".format(vnetname, subnet)
}
}
}
],
"sku": {
"name": "VpnGw2",
"tier": "VpnGw2"
},
"gatewayType": "Vpn",
"vpnType": "RouteBased",
"enableBgp": False,
"activeActive": False,
"vpnClientConfiguration": {
"vpnClientAddressPool": {
"addressPrefixes": [
"172.0.0.0/24"
]
},
"vpnClientProtocols": [
"OpenVPN"
],
"vpnAuthenticationTypes": [
"AAD"
],
"vpnClientRootCertificates": [],
"vpnClientRevokedCertificates": [],
"radiusServers": [],
"vpnClientIpsecPolicies": [],
"aadTenant": aad_tenant,
"aadAudience": aad_audience,
"aadIssuer": aad_issuer
},
"bgpSettings": {
"asn": 65515,
"bgpPeeringAddress": "10.0.4.254",
"peerWeight": 0,
"bgpPeeringAddresses": [
{
"ipconfigurationId": "[concat(resourceId('Microsoft.Network/virtualNetworkGateways', '{}'), '/ipConfigurations/default')]".format(vpn_gateway_name),
"customBgpIpAddresses": []
}
]
},
"vpnGatewayGeneration": "Generation2"
},
"dependsOn": [
f"[resourceId('Microsoft.Network/publicIPAddresses', '{pipname}' )]",
f"Microsoft.Network/virtualNetworks/{vnetname}"
]
})
def _add_netapp(self, cfg, name, deploy_network):
account = cfg["storage"][name]
loc = cfg["location"]
vnet = cfg["vnet"]["name"]
subnet = account["subnet"]
nicdeps = []
gtags = cfg.get("global_tags", {})
rg = cfg["resource_group"]
vnetrg = cfg["vnet"].get("resource_group", rg)
if (rg == vnetrg) and deploy_network:
log.debug("adding delegation to subnet")
rvnet = next((x for x in self.resources if x["name"] == vnet), [])
rsubnet = next(
(x for x in rvnet["properties"]["subnets"] if x["name"] == subnet), None)
if not rsubnet:
log.error(
"subnet ({}) for netapp storage ({}) does not exist".format(subnet, name))
sys.exit(1)
if "delegations" not in rsubnet["properties"]:
rsubnet["properties"]["delegations"] = []
rsubnet["properties"]["delegations"].append({
"properties": {
"serviceName": "Microsoft.Netapp/volumes"
},
"name": "netappdelegation"
})
nicdeps.append("Microsoft.Network/virtualNetworks/"+vnet)
subnetid = "[resourceId('Microsoft.Network/virtualNetworks/subnets', '{}', '{}')]".format(
vnet, subnet)
else:
subnetid = "[resourceId('{}', 'Microsoft.Network/virtualNetworks/subnets', '{}', '{}')]".format(
vnetrg, vnet, subnet)
addomain = account.get("joindomain", None)
props = {}
if addomain:
adip = azutil.get_vm_private_ip(rg, account["ad_server"])
adpassword = account["ad_password"]
adusername = account["ad_username"]
# TODO: previously we used ip address for the dns here
props["activeDirectories"] = [
{
"username": adusername,
"password": adpassword,
"domain": addomain,
"dns": adip,
"smbServerName": "anf"
}
]
self.resources.append({
"name": name,
"type": "Microsoft.NetApp/netAppAccounts",
"apiVersion": "2020-02-01",
"tags": gtags,
"location": loc,
"properties": props,
"dependsOn": nicdeps
})
for poolname in account.get("pools", {}).keys():
pool = account["pools"][poolname]
poolsize = pool["size"]
servicelevel = pool["service_level"]
self.resources.append({
"name": name+"/"+poolname,
"type": "Microsoft.NetApp/netAppAccounts/capacityPools",
"apiVersion": "2020-02-01",
"tags": gtags,
"location": loc,
"properties": {
"size": poolsize * 2**40,
"serviceLevel": servicelevel
},
"dependsOn": [
"[resourceId('Microsoft.NetApp/netAppAccounts', '{}')]".format(name)
],
})
for volname in pool.get("volumes", {}).keys():
vol = pool["volumes"][volname]
volsize = vol["size"]
voltype = vol.get("type", "nfs")
# TODO : Why is this unused ?
#volmount = vol["mount"]
netapp_volume = {
"name": name+"/"+poolname+"/"+volname,
"type": "Microsoft.NetApp/netAppAccounts/capacityPools/volumes",
"apiVersion": "2020-06-01",
"tags": gtags,
"location": loc,
"properties": {
"creationToken": volname,
"serviceLevel": servicelevel,
"usageThreshold": volsize * 2**40,
"subnetId": subnetid
},
"dependsOn": [
"[resourceId('Microsoft.NetApp/netAppAccounts/capacityPools', '{}', '{}')]".format(
name, poolname)
]
}
if voltype == "cifs":
netapp_volume["properties"]["protocolTypes"] = ["CIFS"]
netapp_volume["properties"]["kerberosEnabled"] = "false"
self.resources.append(netapp_volume)
def _add_storageaccount(self, cfg, name):
loc = cfg["location"]
gtags = cfg.get("global_tags", {})
res = {
"type": "Microsoft.Storage/storageAccounts",
"apiVersion": "2019-06-01",
"name": name,
"tags": gtags,
"location": loc,
"sku": {
"name": "Standard_LRS"
},
"kind": "StorageV2",
"properties": {
"accessTier": "Hot"
},
"resources": []
}
for container in cfg["storage"][name].get("containers", []):
res["resources"].append(
{
"type": "blobServices/containers",
"apiVersion": "2019-06-01",
"name": f"default/{container}",
"tags": gtags,
"dependsOn": [
name
]
}
)
self.resources.append(res)
def _add_proximity_group(self, cfg):
ppg = cfg.get("proximity_placement_group_name", None)
gtags = cfg.get("global_tags", {})
if ppg:
loc = cfg["location"]
self.resources.append({
"apiVersion": "2018-04-01",
"type": "Microsoft.Compute/proximityPlacementGroups",
"name": ppg,
"tags": gtags,
"location": loc
})
def __helper_arm_create_osprofile(self, rname, rtype, adminuser, adminpass, sshkey, customdata):
if rtype == "vm":
name = "computerName"
else:
name = "computerNamePrefix"
osprofile = {
name: rname,
"adminUsername": adminuser
}
if customdata:
if customdata.startswith("http"):
customdata = "#include\n" + customdata
osprofile["customData"] = "[base64('" + customdata + "')]"
if adminpass != "<no-password>":
osprofile["adminPassword"] = adminpass
else:
osprofile["linuxConfiguration"] = {
"disablePasswordAuthentication": True,
"ssh": {
"publicKeys": [
{
"keyData": sshkey,
"path": "/home/{}/.ssh/authorized_keys".format(adminuser)
}
]
}
}
return osprofile
def __helper_arm_create_datadisks(self, sizes, sku, cache):
datadisks = []
for i, d in enumerate(sizes):
if d < 4096:
cacheoption = cache
else:
cacheoption = "None"
# To support deploying images that contains datadisks
createOption = "Empty"
if d == 0:
d = ""
createOption = "fromImage"
if sku == "UltraSSD_LRS":
cacheoption = "None"
datadisks.append({
"caching": cacheoption,
"managedDisk": {
"storageAccountType": sku
},
"createOption": createOption,
"lun": i,
"diskSizeGB": d
})
return datadisks
def __helper_arm_create_image_reference(self, refstr):
if ":" in refstr:
return {
"publisher": refstr.split(":")[0],
"offer": refstr.split(":")[1],
"sku": refstr.split(":")[2],
"version": refstr.split(":")[3]
}
else:
return {
"id": refstr
}
def __helper_arm_create_plan(self, refstr):
if ":" in refstr:
return {
"publisher": refstr.split(":")[0],
"product": refstr.split(":")[1],
"name": refstr.split(":")[2]
}
else:
return { }
def __helper_arm_add_zones(self, res, zones):
strzones = []
if type(zones) == list:
for z in zones:
strzones.append(z)
elif zones != None:
strzones.append(str(zones))
if len(strzones) > 0:
res["zones"] = strzones
def _add_vm(self, cfg, r, vnet_in_deployment):
res = cfg["resources"][r]
rsshport = cfg.get("ssh_port", 22)
rtype = res["type"]
rsize = res["vm_type"]
rimage = res["image"]
ros = rimage.split(':')
rinstances = res.get("instances", 1)
rpip = res.get("public_ip", False)
rdns = res.get("dns_name", None)
rnsgallow = res.get("nsg_allow", None)
rnsgsourceip = res.get("nsg_source_ip", None)
rppg = res.get("proximity_placement_group", False)
rppgname = cfg.get("proximity_placement_group_name", None)
raz = res.get("availability_zones", None)
rsubnet = res["subnet"]
ran = res.get("accelerated_networking", False)
rlowpri = res.get("low_priority", False)
rephemeralosdisk = res.get("ephemeral_os_disk", False)
rephemeralosdiskplacement = res.get("ephemeral_os_disk_placement", "CacheDisk")
rosdisksize = res.get("os_disk_size", None)
rosstoragesku = res.get("os_storage_sku", "Premium_LRS")
rdatadisks = res.get("data_disks", [])
rstoragesku = res.get("storage_sku", "Premium_LRS")
rstoragecache = res.get("storage_cache", "ReadWrite")
gtags = cfg.get("global_tags", {})
rtags = res.get("resource_tags", {}).copy()
rtags.update(gtags)
rmanagedidentity = res.get("managed_identity", None)
loc = cfg["location"]
ravset = res.get("availability_set")
customdata = res.get("custom_data", None)
adminuser = cfg["admin_user"]
rrg = cfg["resource_group"]
vnetname = cfg["vnet"]["name"]
vnetrg = cfg["vnet"].get("resource_group", rrg)
if vnet_in_deployment:
rsubnetid = "[resourceId('Microsoft.Network/virtualNetworks/subnets', '{}', '{}')]".format(
vnetname, rsubnet)
else:
rsubnetid = "[resourceId('{}', 'Microsoft.Network/virtualNetworks/subnets', '{}', '{}')]".format(
vnetrg, vnetname, rsubnet)
rpassword = res.get("password", "<no-password>")
with open(adminuser+"_id_rsa.pub") as f:
sshkey = f.read().strip()
if rsshport != 22:
if customdata:
log.error("Cannot specify custom data with a non-standard SSH port for VMs with a public IP.")
customdata = f"#!/bin/bash\nsed -i \"s/^#Port 22/Port 22\\nPort {rsshport}/\" /etc/ssh/sshd_config\nyum install -y policycoreutils-python-utils\nsemanage port -a -t ssh_port_t -p tcp {rsshport}\nsystemctl restart sshd\n"
if ravset and ravset not in self.avsets:
arm_avset = {
"name": ravset,
"type": "Microsoft.Compute/availabilitySets",
"apiVersion": "2018-10-01",
"location": loc,
"tags": gtags,
"sku": {
"name": "Aligned"
},
"properties": {
"platformUpdateDomainCount": 1,
"platformFaultDomainCount": 1
}
}
if rppg:
arm_avset["properties"]["proximityPlacementGroup"] = {
"id": f"[resourceId('Microsoft.Compute/proximityPlacementGroups','{rppgname}')]"
}
arm_avset["dependsOn"] = [
f"Microsoft.Compute/proximityPlacementGroups/{rppgname}"
]
self.resources.append(arm_avset)
self.avsets.add(ravset)
rorig = r
for instance in range(1, rinstances+1):
if rinstances > 1:
r = "{}{:04}".format(rorig, instance)
nicdeps = []
if vnet_in_deployment:
nicdeps.append("Microsoft.Network/virtualNetworks/"+vnetname)
if rpip:
pipname = r+"_pip"
if rdns:
dnsname = rdns
else:
dnsname = azutil.get_dns_label(rrg, pipname, True)
if dnsname:
log.debug(f"dns name: {dnsname} (using existing one)")
else:
dnsname = r+str(uuid.uuid4())[:6]
log.debug(f"dns name: {dnsname}")
nsgname = r+"_nsg"
nicdeps.append("Microsoft.Network/publicIpAddresses/"+pipname)
nicdeps.append(
"Microsoft.Network/networkSecurityGroups/"+nsgname)
pipres = {
"type": "Microsoft.Network/publicIPAddresses",
"apiVersion": "2018-01-01",
"name": pipname,
"location": loc,
"dependsOn": [],
"tags": gtags,
"properties": {
"dnsSettings": {
"domainNameLabel": dnsname
}
}
}
self.__helper_arm_add_zones(pipres, raz)
self.resources.append(pipres)
nsg_security_rules = {
"rdp": {
"name": "default-allow-rdp",
"properties": {
"protocol": "Tcp",
"sourcePortRange": "*",
"destinationPortRange": "3389",
"sourceAddressPrefixes": [ "0.0.0.0/0" ],
"destinationAddressPrefix": "*",
"access": "Allow",
"priority": 1000,
"direction": "Inbound"
}
},
"ssh": {
"name": "default-allow-ssh",
"properties": {
"protocol": "Tcp",
"sourcePortRange": "*",
"destinationPortRange": str(rsshport),
"sourceAddressPrefixes": [ "0.0.0.0/0" ],
"destinationAddressPrefix": "*",
"access": "Allow",
"priority": 1010,
"direction": "Inbound"
}
},
"http": {
"name": "default-allow-http",
"properties": {
"protocol": "Tcp",
"sourcePortRange": "*",
"destinationPortRange": "80",
"sourceAddressPrefixes": [ "0.0.0.0/0" ],
"destinationAddressPrefix": "*",
"access": "Allow",
"priority": 1020,
"direction": "Inbound"
}
},
"https": {
"name": "default-allow-https",
"properties": {
"protocol": "Tcp",
"sourcePortRange": "*",
"destinationPortRange": "443",
"sourceAddressPrefixes": [ "0.0.0.0/0" ],
"destinationAddressPrefix": "*",
"access": "Allow",
"priority": 1030,
"direction": "Inbound"
}
},
"grafana": {
"name": "default-allow-grafana",
"properties": {
"protocol": "Tcp",
"sourcePortRange": "*",
"destinationPortRange": "3000",
"sourceAddressPrefixes": [ "0.0.0.0/0" ],
"destinationAddressPrefix": "*",
"access": "Allow",
"priority": 1040,
"direction": "Inbound"
}
},
"zcentral": {
"name": "default-allow-zcentral",
"properties": {
"protocol": "Tcp",
"sourcePortRange": "*",
"destinationPortRange": "42966",
"sourceAddressPrefixes": [ "0.0.0.0/0" ],
"destinationAddressPrefix": "*",
"access": "Allow",
"priority": 1050,
"direction": "Inbound"
}
},
"vnc": {
"name": "default-allow-vnc",
"properties": {
"protocol": "Tcp",
"sourcePortRange": "*",
"destinationPortRange": "4060",
"sourceAddressPrefixes": [ "0.0.0.0/0" ],
"destinationAddressPrefix": "*",
"access": "Allow",
"priority": 1060,
"direction": "Inbound"
}
}
}
if rnsgallow:
nsgrules = [ nsg_security_rules[service] for service in rnsgallow ]
else:
if ros[0] == "MicrosoftWindowsServer" or ros[0] == "MicrosoftWindowsDesktop":
nsgrules = [ nsg_security_rules["rdp"] ]
else:
nsgrules = [ nsg_security_rules["ssh"] ]
if rnsgsourceip:
for rule in nsgrules:
if isinstance(rnsgsourceip, str):
log.warning("Converting deprecated string syntax for 'nsg_source_ip' to list")
if "*" in rnsgsourceip:
rnsgsourceip = [ "0.0.0.0/0" ]
else:
rnsgsourceip = [ rnsgsourceip ]
rule["properties"]["sourceAddressPrefixes"] = rnsgsourceip
self.resources.append({
"type": "Microsoft.Network/networkSecurityGroups",
"apiVersion": "2017-06-01",
"name": nsgname,
"location": loc,
"dependsOn": [],
"tags": gtags,
"properties": {
"securityRules": nsgrules
}
})
nicname = r+"_nic"
ipconfigname = r+"_ipconfig"
nicprops = {
"ipConfigurations": [
{
"name": ipconfigname,
"properties": {
"privateIPAllocationMethod": "Dynamic",
"subnet": {
"id": rsubnetid
}
}
}
],
"enableAcceleratedNetworking": ran
}
if rpip:
nicprops["ipConfigurations"][0]["properties"]["publicIPAddress"] = {
"id": "[resourceId('Microsoft.Network/publicIPAddresses', '{}')]".format(pipname)
}
nicprops["networkSecurityGroup"] = {
"id": "[resourceId('Microsoft.Network/networkSecurityGroups', '{}')]".format(nsgname)
}
self.resources.append({
"type": "Microsoft.Network/networkInterfaces",
"apiVersion": "2016-09-01",
"name": nicname,
"location": loc,
"dependsOn": nicdeps,
"tags": gtags,
"properties": nicprops
})
osprofile = self.__helper_arm_create_osprofile(
r, rtype, adminuser, rpassword, sshkey, customdata)
datadisks = self.__helper_arm_create_datadisks(
rdatadisks, rstoragesku, rstoragecache)
imageref = self.__helper_arm_create_image_reference(rimage)
deps = [f"Microsoft.Network/networkInterfaces/{nicname}"]
if rppg:
deps.append(
f"Microsoft.Compute/proximityPlacementGroups/{rppgname}")
if ravset:
deps.append(f"Microsoft.Compute/availabilitySets/{ravset}")
# Add support for cyclecloud/azhpc plan
plan = ""
if ros[0] == "azurecyclecloud" or ros[0] == "azhpc" or ros[0] == "almalinux" or ros[0] == "erockyenterprisesoftwarefoundationinc1653071250513":
plan = self.__helper_arm_create_plan(rimage)
if rephemeralosdisk == True:
osdisk = {
"diffDiskSettings": {
"option": "Local",
"placement": rephemeralosdiskplacement
},
"caching": "ReadOnly",
"createOption": "FromImage"
}
else:
osdisk = {
"name": f"{r}_osdisk",
"createOption": "fromImage",
"caching": "ReadWrite",
"managedDisk": {
"storageAccountType": rosstoragesku
}
}
vmres = {
"type": "Microsoft.Compute/virtualMachines",
"apiVersion": "2020-06-01",
"name": r,
"location": loc,
"plan": plan,
"dependsOn": deps,
"tags": rtags,
"properties": {
"hardwareProfile": {
"vmSize": rsize
},
"networkProfile": {
"networkInterfaces": [
{
"id": "[resourceId('Microsoft.Network/networkInterfaces', '{}')]".format(nicname)
}
]
},
"storageProfile": {
"osDisk": osdisk,
"imageReference": imageref,
"dataDisks": datadisks
},
"osProfile": osprofile
}
}
if rlowpri:
vmres["properties"]["priority"] = "Spot"
vmres["properties"]["evictionPolicy"] = "Deallocate"
if rppg:
vmres["properties"]["proximityPlacementGroup"] = {
"id": f"[resourceId('Microsoft.Compute/proximityPlacementGroups','{rppgname}')]"
}
if rstoragesku == "UltraSSD_LRS":
vmres["properties"]["additionalCapabilities"] = {
"ultraSSDEnabled": True}
if ravset:
vmres["properties"]["availabilitySet"] = {
"id": f"[resourceId('Microsoft.Compute/availabilitySets','{ravset}')]"
}
if rosdisksize:
vmres["properties"]["storageProfile"]["osDisk"]["diskSizeGb"] = rosdisksize
if rmanagedidentity is not None:
vmres["identity"] = {
"type": "SystemAssigned"
}
role_lookup = {
"reader": "[resourceId('Microsoft.Authorization/roleDefinitions', 'acdd72a7-3385-48ef-bd42-f606fba81ae7')]",
"contributor": "[resourceId('Microsoft.Authorization/roleDefinitions', 'b24988ac-6180-42a0-ab88-20f7382dd24c')]",
"owner": "[resourceId('Microsoft.Authorization/roleDefinitions', '8e3af657-a8ff-443c-a75c-2fe8c4bcb635')]"
}
role = rmanagedidentity.get("role", "reader")
if role not in role_lookup:
log.error(
f"{role} is an invalid role for a managed identity (options are: {', '.join(role_lookup.keys())})")
sys.exit(1)
scope_lookup = {
"resource_group": "[resourceGroup().id]",
"subscription": "[subscription().subscriptionId]"
}
scope = rmanagedidentity.get("scope", "resource_group")
if scope not in scope_lookup:
log.error(
f"{scope} is an invalid scope for a managed identity (options are: {', '.join(scope_lookup.keys())})")
sys.exit(1)
self.resources.append({
"apiVersion": "2017-09-01",
"type": "Microsoft.Authorization/roleAssignments",
"name": f"[guid(subscription().subscriptionId, resourceGroup().id, '{r}')]",
"properties": {
"roleDefinitionId": role_lookup[role],
"principalId": f"[reference('{r}', '2017-12-01', 'Full').identity.principalId]",
"scope": scope_lookup[scope]
},
"dependsOn": [
f"[resourceId('Microsoft.Compute/virtualMachines/', '{r}')]"
]
})
self.__helper_arm_add_zones(vmres, raz)
self.resources.append(vmres)
def _add_vmss(self, cfg, r, vnet_in_deployment):
res = cfg["resources"][r]
rtype = res["type"]
rsize = res["vm_type"]
rimage = res["image"]
ros = rimage.split(':')
rinstances = res.get("instances")
rppg = res.get("proximity_placement_group", False)
rppgname = cfg.get("proximity_placement_group_name", None)
raz = res.get("availability_zones", None)
rfaultdomaincount = res.get("fault_domain_count", 1)
rsingleplacementgroup = res.get("single_placement_group", True)
roverprovision = res.get("overprovision", True)
rsubnet = res["subnet"]
ran = res.get("accelerated_networking", False)
rlowpri = res.get("low_priority", False)
rephemeralosdisk = res.get("ephemeral_os_disk", False)
rephemeralosdiskplacement = res.get("ephemeral_os_disk_placement", "CacheDisk")
rosdisksize = res.get("os_disk_size", None)
rosstoragesku = res.get("os_storage_sku", "Premium_LRS")
rdatadisks = res.get("data_disks", [])
rstoragesku = res.get("storage_sku", "Premium_LRS")
rstoragecache = res.get("storage_cache", "ReadWrite")
customdata = res.get("custom_data", None)
loc = cfg["location"]
adminuser = cfg["admin_user"]
rrg = cfg["resource_group"]
gtags = cfg.get("global_tags", {})
rtags = res.get("resource_tags", {}).copy()
rtags.update(gtags)
rmanagedidentity = res.get("managed_identity", None)
vnetname = cfg["vnet"]["name"]
vnetrg = cfg["vnet"].get("resource_group", rrg)
if vnet_in_deployment:
rsubnetid = "[resourceId('Microsoft.Network/virtualNetworks/subnets', '{}', '{}')]".format(
vnetname, rsubnet)
else:
rsubnetid = "[resourceId('{}', 'Microsoft.Network/virtualNetworks/subnets', '{}', '{}')]".format(
vnetrg, vnetname, rsubnet)
rpassword = res.get("password", "<no-password>")
with open(adminuser+"_id_rsa.pub") as f:
sshkey = f.read().strip()
deps = []
if vnet_in_deployment:
deps.append("Microsoft.Network/virtualNetworks/"+vnetname)
if rppg:
deps.append("Microsoft.Compute/proximityPlacementGroups/"+rppgname)
osprofile = self.__helper_arm_create_osprofile(
r, rtype, adminuser, rpassword, sshkey, customdata)
datadisks = self.__helper_arm_create_datadisks(
rdatadisks, rstoragesku, rstoragecache)
imageref = self.__helper_arm_create_image_reference(rimage)
# Add support for cyclecloud/azurehpc plan
plan = ""
if ros[0] == "azurecyclecloud" or ros[0] == "azhpc" or ros[0] == "almalinux" or ros[0] == "erockyenterprisesoftwarefoundationinc1653071250513":
plan = self.__helper_arm_create_plan(rimage)
if rephemeralosdisk == True:
osdisk = {
"diffDiskSettings": {
"option": "Local",
"placement": rephemeralosdiskplacement
},
"caching": "ReadOnly",
"createOption": "FromImage"
}
else:
osdisk = {
"createOption": "FromImage",
"caching": "ReadWrite",
"managedDisk": {
"storageAccountType": rosstoragesku
}
}
nicname = r+"_nic"
ipconfigname = r+"_ipconfig"
vmssres = {
"type": "Microsoft.Compute/virtualMachineScaleSets",
"apiVersion": "2020-06-01",
"name": r,
"location": loc,
"plan": plan,
"dependsOn": deps,
"tags": rtags,
"sku": {
"name": rsize,
"capacity": rinstances
},
"properties": {
"overprovision": roverprovision,
"upgradePolicy": {
"mode": "manual"
},
"virtualMachineProfile": {
"storageProfile": {
"osDisk": osdisk,
"dataDisks": datadisks,
"imageReference": imageref
},
"osProfile": osprofile,
"networkProfile": {
"networkInterfaceConfigurations": [
{
"name": nicname,
"properties": {
"primary": "true",
"ipConfigurations": [
{
"name": ipconfigname,
"properties": {
"subnet": {
"id": rsubnetid
}
}
}
],
"enableAcceleratedNetworking": ran
}
}
]
}
},
"singlePlacementGroup": rsingleplacementgroup
}
}
if rfaultdomaincount:
vmssres["properties"]["platformFaultDomainCount"] = rfaultdomaincount
if rppg:
vmssres["properties"]["proximityPlacementGroup"] = {
"id": "[resourceId('Microsoft.Compute/proximityPlacementGroups','{}')]".format(rppgname)
}
if rstoragesku == "UltraSSD_LRS":
vmssres["properties"]["additionalCapabilities"] = {
"ultraSSDEnabled": True
}
if rlowpri:
vmssres["properties"]["virtualMachineProfile"]["priority"] = "Spot"
vmssres["properties"]["virtualMachineProfile"]["evictionPolicy"] = "Delete"
if rosdisksize:
vmssres["properties"]["virtualMachineProfile"]["storageProfile"]["osDisk"]["diskSizeGb"] = rosdisksize
if rmanagedidentity is not None:
vmssres["identity"] = {
"type": "SystemAssigned"
}
role_lookup = {
"reader": "[resourceId('Microsoft.Authorization/roleDefinitions', 'acdd72a7-3385-48ef-bd42-f606fba81ae7')]",
"contributor": "[resourceId('Microsoft.Authorization/roleDefinitions', 'b24988ac-6180-42a0-ab88-20f7382dd24c')]",
"owner": "[resourceId('Microsoft.Authorization/roleDefinitions', '8e3af657-a8ff-443c-a75c-2fe8c4bcb635')]"
}
role = rmanagedidentity.get("role", "reader")
if role not in role_lookup:
log.error(
f"{role} is an invalid role for a managed identity (options are: {', '.join(role_lookup.keys())})")
sys.exit(1)
scope_lookup = {
"resource_group": "[resourceGroup().id]",
"subscription": "[subscription().subscriptionId]"
}
scope = rmanagedidentity.get("scope", "resource_group")
if scope not in scope_lookup:
log.error(
f"{scope} is an invalid scope for a managed identity (options are: {', '.join(scope_lookup.keys())})")
sys.exit(1)
self.resources.append({
"apiVersion": "2017-09-01",
"type": "Microsoft.Authorization/roleAssignments",
"name": f"[guid(subscription().subscriptionId, resourceGroup().id, '{r}')]",
"tags": gtags,
"properties": {
"roleDefinitionId": role_lookup[role],
"principalId": f"[reference('{r}', '2017-12-01', 'Full').identity.principalId]",
"scope": scope_lookup[scope]
},
"dependsOn": [
f"[resourceId('Microsoft.Compute/virtualMachineScaleSets/', '{r}')]"
]
})
self.__helper_arm_add_zones(vmssres, raz)
self.resources.append(vmssres)
def read_resources(self, cfg, vnet_in_deployment):
resources = cfg.get("resources", {})
for r in resources.keys():
rtype = cfg["resources"][r]["type"]
if rtype == "vm":
self._add_vm(cfg, r, vnet_in_deployment)
elif rtype == "vmss":
self._add_vmss(cfg, r, vnet_in_deployment)
elif rtype == "slurm_partition":
pass
else:
log.error(
"unrecognised resource type ({}) for {}".format(rtype, r))
def has_resources(self):
return len(self.resources) > 0
def read(self, cfg, deploy_network):
rg = cfg["resource_group"]
vnetrg = cfg["vnet"].get("resource_group", rg)
vnet_in_deployment = bool(rg == vnetrg) and deploy_network
if deploy_network:
self._add_network(cfg)
self._add_proximity_group(cfg)
self.read_resources(cfg, vnet_in_deployment)
storage = cfg.get("storage", {})
for s in storage.keys():
stype = cfg["storage"][s]["type"]
if stype == "anf":
self._add_netapp(cfg, s, deploy_network)
elif stype == "storageaccount":
self._add_storageaccount(cfg, s)
else:
log.error(
"unrecognised storage type ({}) for {}".format(stype, s))
def to_json(self):
return json.dumps({
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": self.parameters,
"variables": self.variables,
"resources": self.resources,
"outputs": self.outputs
}, indent=4)
|
6c01989a92ca794a161a2dd8ce091934fab4f9de
|
ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb
|
/python_modules/dagster/dagster/_utils/temp_file.py
|
90f7dbf762301fc595620babeee3c0f43af85083
|
[
"Apache-2.0"
] |
permissive
|
dagster-io/dagster
|
6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a
|
fe21995e0402878437a828c6a4244025eac8c43b
|
refs/heads/master
| 2023-09-05T20:46:08.203794
| 2023-09-05T19:54:52
| 2023-09-05T19:54:52
| 131,619,646
| 8,565
| 1,154
|
Apache-2.0
| 2023-09-14T21:57:37
| 2018-04-30T16:30:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,820
|
py
|
temp_file.py
|
import itertools
import os
import shutil
import tempfile
from contextlib import contextmanager
import dagster._check as check
from dagster._core.storage.file_manager import LocalFileHandle
def _unlink_swallow_errors(path):
check.str_param(path, "path")
try:
os.unlink(path)
except Exception:
pass
@contextmanager
def get_temp_file_handle_with_data(data):
with get_temp_file_name_with_data(data) as temp_file:
yield LocalFileHandle(temp_file)
@contextmanager
def get_temp_file_name_with_data(data):
with get_temp_file_name() as temp_file:
with open(temp_file, "wb") as ff:
ff.write(data)
yield temp_file
@contextmanager
def get_temp_file_handle():
with get_temp_file_name() as temp_file:
yield LocalFileHandle(temp_file)
@contextmanager
def get_temp_file_name():
handle, temp_file_name = tempfile.mkstemp()
os.close(handle) # just need the name - avoid leaking the file descriptor
try:
yield temp_file_name
finally:
_unlink_swallow_errors(temp_file_name)
@contextmanager
def get_temp_file_names(number):
check.int_param(number, "number")
temp_file_names = list()
for _ in itertools.repeat(None, number):
handle, temp_file_name = tempfile.mkstemp()
os.close(handle) # # just need the name - avoid leaking the file descriptor
temp_file_names.append(temp_file_name)
try:
yield tuple(temp_file_names)
finally:
for temp_file_name in temp_file_names:
_unlink_swallow_errors(temp_file_name)
@contextmanager
def get_temp_dir(in_directory=None):
temp_dir = None
try:
temp_dir = tempfile.mkdtemp(dir=in_directory)
yield temp_dir
finally:
if temp_dir:
shutil.rmtree(temp_dir)
|
5921e25677ec6a72b3a5bf465dc01c595e7f0d3a
|
609c2895c89a142e91939ae3109f7353789f3de7
|
/test/tests/test_revault.py
|
5958ef29c9157e44f14cd2f9be0340b1bf89e497
|
[
"MIT"
] |
permissive
|
cryptoadvance/specter-diy
|
a668883597f9b2beaf739f4cb9dff9ec753f4914
|
58e91aef6464c63779fa31e1fef4b69126937700
|
refs/heads/master
| 2023-08-17T03:58:21.682752
| 2023-08-13T19:13:19
| 2023-08-13T19:13:30
| 205,435,476
| 391
| 79
|
MIT
| 2023-08-16T19:40:36
| 2019-08-30T18:19:49
|
Python
|
UTF-8
|
Python
| false
| false
| 7,962
|
py
|
test_revault.py
|
from unittest import TestCase
from .util import get_keystore, get_wallets_app, clear_testdir, check_sigs
from embit.psbt import PSBT
from embit.psbtview import PSBTView
from apps.wallets.wallet import Wallet, WalletError
from io import BytesIO
WALLETS = [
"cpfp_descriptor&wsh(multi(1,xpub661MyMwAqRbcG9F1VX1rYev3ZsFRQ8XdJYk7rYLiaCfLt3WjVQ5o12SeJHPQEUCC8NmTWFmyejXrp3GWTFQNrXHL7iUXcoRo79VEnmKGYhc/*,xpub6CZFHPW1GiB8YgV7zGpeQDB6mMHZYPQyUaHrM1nMvKMgLxwok4xCtnzjuxQ3p1LHJUkz5i1Y7bRy5fmGrdg8UBVb39XdXNtWWd2wTsNd7T9/*))",
"deposit_descriptor&wsh(multi(2,xpub661MyMwAqRbcEgaXJWejvvXw2PHvr6ZF5ybqahrCnptM1kTNirjVETa6Ba4oTJz3Uppghzow6pQaGzyPaUVeb7vSufEYRUVMyNKX9PyRxWC/*,xpub6DEzq5DNPx2rPiZJ7wvFhxRKUKDoV1GwjFmFdaxFfbsw9HsHyxc9usoRUMxqJaMrwoXh4apahsGEnjAS4cVCBDgqsx5Groww22AdHbgxVDg/*))",
"unvault_descriptor&wsh(andor(multi(1,xpub661MyMwAqRbcG9F1VX1rYev3ZsFRQ8XdJYk7rYLiaCfLt3WjVQ5o12SeJHPQEUCC8NmTWFmyejXrp3GWTFQNrXHL7iUXcoRo79VEnmKGYhc/*,xpub6CZFHPW1GiB8YgV7zGpeQDB6mMHZYPQyUaHrM1nMvKMgLxwok4xCtnzjuxQ3p1LHJUkz5i1Y7bRy5fmGrdg8UBVb39XdXNtWWd2wTsNd7T9/*),and_v(v:multi(2,02abe475b199ec3d62fa576faee16a334fdb86ffb26dce75becebaaedf328ac3fe,030f64b922aee2fd597f104bc6cb3b670f1ca2c6c49b1071a1a6c010575d94fe5a),older(3)),thresh(2,pkh(xpub661MyMwAqRbcEgaXJWejvvXw2PHvr6ZF5ybqahrCnptM1kTNirjVETa6Ba4oTJz3Uppghzow6pQaGzyPaUVeb7vSufEYRUVMyNKX9PyRxWC/*),a:pkh(xpub6DEzq5DNPx2rPiZJ7wvFhxRKUKDoV1GwjFmFdaxFfbsw9HsHyxc9usoRUMxqJaMrwoXh4apahsGEnjAS4cVCBDgqsx5Groww22AdHbgxVDg/*))))#0v6kshkd",
# unvault_descriptor without cosigners and using xpub
"unvault_descriptor_without_cosigs&wsh(andor(thresh(1,pk(xpub661MyMwAqRbcFozCJEKQbfoEScGGAeV8Mx6RGwwYmFgeKyJTAfyye2L7uuCmNaZycG9DK3jWB9Gvpre94LTu7J3CheiQitNJjDF9RMtT5Kt/*)),older(10),thresh(2,pkh(xpub661MyMwAqRbcEyoQHjRmcCXrEw8YVZxecB1fmm17Dqs9muXBAQUrLGk9JGWBBoqrB9VnMdzN5u2D8dVdLnViTxWSbRmb58xNEX3GARisZBx/*),a:pkh(xpub661MyMwAqRbcGB7fwdofjHV6FsR2fTxC8tkb96eT453UaCaxKTgLUNHW2sjShw4eez7afHnwF2F8G4VfaeeD5Kg3TXrKtMKtXJvUK3Jh3uE/*))))",
]
MNEMONICS = [
"glue possible carpet youth pepper damp capital wrist wage weird fame drastic story vehicle same",
"toddler vanish target people solar lens midnight great ability state imitate spot discover swamp park",
"rapid veteran belt horse evidence wine rabbit price protect foam summer excuse",
]
PSBTS = [
# emergency tx
( # unsigned, signed tx
"cHNidP8BAF4CAAAAAWxdMAXc2LtoRZ5LZIS5xzBiCcMfDRxDuqljOjadSu6ZAQAAAAD9////AXCWmjsAAAAAIgAgy7Co1PHzwoce0hHQR5RHMS72lSZudTF3bYrNgqLbkDYAAAAAAAEBKwDKmjsAAAAAIgAgslrG3QMtjTKpRrHl3ptzoveYxLV1H4x72eg19U2FZPUBAwSBAAAAAQVHUiEDVCsD4yYHkfSMBmB7mvLabNgt+VS7H3cnZivvwQCZ0H8hAlgt7b9E9GVk5djNsGdTbWDr40zR0YAc/1G7+desKJtDUq4iBgNUKwPjJgeR9IwGYHua8tps2C35VLsfdydmK+/BAJnQfwgMIvXFAAAAAAAA",
"cHNidP8BAF4CAAAAAWxdMAXc2LtoRZ5LZIS5xzBiCcMfDRxDuqljOjadSu6ZAQAAAAD9////AXCWmjsAAAAAIgAgy7Co1PHzwoce0hHQR5RHMS72lSZudTF3bYrNgqLbkDYAAAAAACICA1QrA+MmB5H0jAZge5ry2mzYLflUux93J2Yr78EAmdB/RzBEAiBZe+5519IIyaStMzEwhtADhy1zcJGjcvJDMc4czrmg3QIgf9i/JJZR7jYatii8Q/K67N/Nr7PvifdMFIwD5bZ0M7qBAAA=",
[0], # mnemonics that can sign, signed by the first one in the list
["deposit_descriptor"], # signing wallets
),
# emergency_unvault_tx
(
"cHNidP8BAF4CAAAAAfBtTnKT2ciyQFiNsM+ndFiUsOhgSQbIuIm3hdZMU7a7AAAAAAD9////AYr9mTsAAAAAIgAgy7Co1PHzwoce0hHQR5RHMS72lSZudTF3bYrNgqLbkDYAAAAAAAEBK7hCmjsAAAAAIgAgb2phQajLZoTd4qwrA+t5RfiT0EoT+9+8gnKxE7f2ALIBAwSBAAAAAQXKUSEDQtzABCWiGw3zomghjr/+JYlRWPapkMkFz1bYAPyEX6EhAgKTOrEDfq0KpKeFjG1J1nBeH7O8X2awCRive58A7NUmUq5kdqkUTeFfs5G8hINHgr2lEXGqC1pqmhuIrGt2qRRyqV8ir5obrrhS+alScvjCHZjyZIisbJNSh2dSIQKr5HWxmew9YvpXb67hajNP24b/sm3Odb7Ouq7fMorD/iEDD2S5Iq7i/Vl/EEvGyztnDxyixsSbEHGhpsAQV12U/lpSr1OyaCIGA1QrA+MmB5H0jAZge5ry2mzYLflUux93J2Yr78EAmdB/CAwi9cUAAAAAAAA=",
"cHNidP8BAF4CAAAAAfBtTnKT2ciyQFiNsM+ndFiUsOhgSQbIuIm3hdZMU7a7AAAAAAD9////AYr9mTsAAAAAIgAgy7Co1PHzwoce0hHQR5RHMS72lSZudTF3bYrNgqLbkDYAAAAAACICA1QrA+MmB5H0jAZge5ry2mzYLflUux93J2Yr78EAmdB/RzBEAiBZALTbnvGA+0JaUF2XSbeZbRbgWJHmJCUonqwPQL5a5QIgPc7ASzdlYR+fQ31xZPzvsuw76UGepmmJI2Q5E9gyIKqBAAA=",
[0, 1],
["unvault_descriptor"],
),
# cancel_tx
(
"cHNidP8BAF4CAAAAAfBtTnKT2ciyQFiNsM+ndFiUsOhgSQbIuIm3hdZMU7a7AAAAAAD9////AYr9mTsAAAAAIgAgslrG3QMtjTKpRrHl3ptzoveYxLV1H4x72eg19U2FZPUAAAAAAAEBK7hCmjsAAAAAIgAgb2phQajLZoTd4qwrA+t5RfiT0EoT+9+8gnKxE7f2ALIBAwSBAAAAAQXKUSEDQtzABCWiGw3zomghjr/+JYlRWPapkMkFz1bYAPyEX6EhAgKTOrEDfq0KpKeFjG1J1nBeH7O8X2awCRive58A7NUmUq5kdqkUTeFfs5G8hINHgr2lEXGqC1pqmhuIrGt2qRRyqV8ir5obrrhS+alScvjCHZjyZIisbJNSh2dSIQKr5HWxmew9YvpXb67hajNP24b/sm3Odb7Ouq7fMorD/iEDD2S5Iq7i/Vl/EEvGyztnDxyixsSbEHGhpsAQV12U/lpSr1OyaCIGA1QrA+MmB5H0jAZge5ry2mzYLflUux93J2Yr78EAmdB/CAwi9cUAAAAAAAA=",
"cHNidP8BAF4CAAAAAfBtTnKT2ciyQFiNsM+ndFiUsOhgSQbIuIm3hdZMU7a7AAAAAAD9////AYr9mTsAAAAAIgAgslrG3QMtjTKpRrHl3ptzoveYxLV1H4x72eg19U2FZPUAAAAAACICA1QrA+MmB5H0jAZge5ry2mzYLflUux93J2Yr78EAmdB/RzBEAiA8rmMXMTlf9+cMZmv4CZZ85Mmrx+XKLczcclVylJlhcwIgWDVSyFW7mXRgbAzh3i7HENutcSa+ZlC7eB/hRX88EkeBAAA=",
[0, 1],
["unvault_descriptor"],
),
# cancel_tx with unvault_descriptor_without_cosigs
(
"cHNidP8BAF4CAAAAAeJnSwIQ9BWdLKX8LrkM0VoRlchm6pqjFOpBpLFL2fxOAAAAAAD9////AbRW9wcAAAAAIgAgJMk4bvxMit7yF7KTHK7Rz0iR9JdwUmrVcD1ok7SRAs4AAAAAAAEBK1iS9wcAAAAAIgAgI88K3dsiL1U8Vmg3JLhg6gUnFCXGvTkK+781DS0uewYBAwSBAAAAAQVhIQKm242c21PacXWunfRWSZwk7VablpGPi7Vg8DQ4M9cPCaxRh2R2qRQnDL27yUjb1W8opiZchrMRU7rMU4isa3apFJWb6zI1jM0Z+5Pp/SVULQyNmi98iKxsk1KHZ1qyaCIGAqbbjZzbU9pxda6d9FZJnCTtVpuWkY+LtWDwNDgz1w8JCH4IUSgAAAAAIgYCvsxevQZJyDaSLOinK1TPnb3J195WFj3PcGNos7Wt4wkI5wT/vgAAAAAiBgNhT1Lm/4dL7XHNresMp77aznG0Dz6ARZgsgiL88uRE+wiKZPKpAAAAAAAiAgK+zF69BknINpIs6KcrVM+dvcnX3lYWPc9wY2izta3jCQjnBP++AAAAACICA2FPUub/h0vtcc2t6wynvtrOcbQPPoBFmCyCIvzy5ET7CIpk8qkAAAAAAA==",
"cHNidP8BAF4CAAAAAeJnSwIQ9BWdLKX8LrkM0VoRlchm6pqjFOpBpLFL2fxOAAAAAAD9////AbRW9wcAAAAAIgAgJMk4bvxMit7yF7KTHK7Rz0iR9JdwUmrVcD1ok7SRAs4AAAAAACICA2FPUub/h0vtcc2t6wynvtrOcbQPPoBFmCyCIvzy5ET7RzBEAiBXdewAkgELBOv1Dp76QoxzuCzXGEzrs/J+r6a8AQ9PWgIgZRfICcrHVKsfqWgVIzIAa60smwdqUe5kfL0QmCpDCtKBAAA=",
[2],
["unvault_descriptor_without_cosigs"],
),
]
class RevaultTest(TestCase):
def test_revault_sign(self):
"""Basic signing of the PSBT"""
for i, mnemonic in enumerate(MNEMONICS):
clear_testdir()
ks = get_keystore(mnemonic=mnemonic, password="")
wapp = get_wallets_app(ks, 'main')
# add wallets
for wdesc in WALLETS:
w = wapp.manager.parse_wallet(wdesc)
wapp.manager.add_wallet(w)
for j, (unsigned, signed, mnemonic_idx, wnames) in enumerate(PSBTS):
psbt = PSBT.from_string(unsigned)
s = BytesIO(psbt.to_string().encode())
# check it can sign b64-psbt
self.assertTrue(wapp.can_process(s))
# check it can sign raw psbt
s = BytesIO(psbt.serialize())
self.assertTrue(wapp.can_process(s))
fout = BytesIO()
wallets, meta = wapp.manager.preprocess_psbt(s, fout)
# check that we detected wallet and a non-standard sighash
self.assertEqual([w.name for w in wallets], wnames)
self.assertEqual([inp.get("label").replace(" (watch-only)", "") for inp in meta["inputs"]], wnames)
self.assertEqual(meta["inputs"][0].get("sighash"), "ALL | ANYONECANPAY")
fout.seek(0)
psbtv = PSBTView.view(fout)
b = BytesIO()
if i not in mnemonic_idx:
with self.assertRaises(WalletError):
sig_count = wapp.manager.sign_psbtview(psbtv, b, wallets, None)
elif i == mnemonic_idx[0]:
sig_count = wapp.manager.sign_psbtview(psbtv, b, wallets, None)
self.assertTrue(check_sigs(PSBT.parse(b.getvalue()), PSBT.from_base64(signed)))
|
3feb41e2986004b14ffa6a5f1fb7d169e06dd535
|
a66149460eda1d5343ee8e94401d91f3f2270015
|
/ans/ans.py
|
f456cf6052e644632fdd09cb01a700cd5df8cfb5
|
[
"MIT"
] |
permissive
|
bjlkeng/sandbox
|
59ae04a984f9a6a01caac78141250c9388e40574
|
3dac79472df6e895d6eb57f4fdbdaf2032ec0cf6
|
refs/heads/master
| 2023-09-01T03:06:34.845840
| 2023-08-18T01:11:43
| 2023-08-18T01:11:43
| 89,071,009
| 190
| 83
|
MIT
| 2023-08-18T01:13:22
| 2017-04-22T13:17:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,713
|
py
|
ans.py
|
import numpy as np
from math import ceil, floor
from decimal import Decimal
DEBUG=False
def generate_string(size, alphabet, prob=None):
assert len(alphabet) > 0
if prob is None:
prob = [1 / len(alphabet)] * len(alphabet)
assert len(alphabet) == len(prob)
assert sum(prob) == 1
return list(np.random.choice(alphabet, size, p=prob))
def code_uabs(msg, alphabet, prob):
assert len(alphabet) == 2
assert len(prob) == len(alphabet)
assert sum(prob) == 1.0
if prob[0] > prob[1]:
prob[0], prob[1] = prob[1], prob[0]
alphabet[0], alphabet[1] = alphabet[1], alphabet[0]
p = prob[0]
code = 1
for x in msg:
index = alphabet.index(x)
# s == 1 is lower prob character
s = 1 if index == 0 else 0
if s == 0:
# C(x,0) = new_x
code = ceil(Decimal(code + 1) / (Decimal(1) - Decimal(p))) - 1
else:
# C(x,1) = new_x
code = floor(Decimal(code) / Decimal(p))
return code
def decode_uabs(code, alphabet, prob):
assert len(alphabet) == 2
assert len(prob) == len(alphabet)
assert sum(prob) == 1.0
if prob[0] > prob[1]:
prob[0], prob[1] = prob[1], prob[0]
alphabet[0], alphabet[1] = alphabet[1], alphabet[0]
msg = []
p = prob[0]
while code > 1:
# 0 if fract(x*p) < 1-p, else 1
s = ceil(Decimal(code + 1) * Decimal(p)) - ceil(Decimal(code) * Decimal(p))
if s == 0:
# D(x) = (new_x, 0)
code = code - ceil(Decimal(code) * Decimal(p))
else:
# D(x) = (new_x, 1)
code = ceil(Decimal(code) * Decimal(p))
msg = [alphabet[1-s]] + msg
return msg
def code_rans(msg, alphabet, freqs, quant_bits=12, renorm_bits=16):
'''
msg - list of strings where each character should be in alphabet
alphabet - list of strings representing symbols in alphabet
freqs - List of integers f[s] s.t. p_s ~= f[s] / 2^quant_bits
quant_bits - exponent of 2^N (quantizing factor)
renorm_bits - n-bit renormalization
'''
assert len(freqs) == len(alphabet)
assert all([type(f) == int for f in freqs])
assert all([f > 0 for f in freqs])
assert sum(freqs) == 1 << quant_bits
assert quant_bits <= renorm_bits
cdf = []
for i in range(len(freqs) + 1):
cdf.append(sum(freqs[:i]))
assert len(cdf) == len(freqs) + 1
codes = []
code = (1 << renorm_bits) - 1
for x in msg:
if DEBUG:
pcode = code
index = alphabet.index(x)
# Renormalization - if we would push past 2**renorm_bits, then renorm
new_code = ((floor(code / freqs[index]) << quant_bits)
+ (code % freqs[index])
+ cdf[index])
if new_code > ((1 << (2 * renorm_bits)) - 1):
if DEBUG:
print ('renorm')
codes.append(code & ((1 << renorm_bits) - 1))
code = code >> renorm_bits
# rANS
code = ((floor(code / freqs[index]) << quant_bits)
+ (code % freqs[index])
+ cdf[index])
if DEBUG:
print (pcode, ' -> ', code)
codes.append(code)
return codes
def decode_rans(codes, alphabet, freqs, quant_bits=8, renorm_bits=16):
'''
codes - coded message
alphabet - list of strings representing symbols in alphabet
freqs - List of integers f[s] s.t. p_s ~= f[s] / 2^quant_bits
quant_bits - exponent of 2^N (quantizing factor)
renorm_bits - n-bit renormalization
'''
assert len(freqs) == len(alphabet)
assert all([type(f) == int for f in freqs])
assert sum(freqs) == (1 << quant_bits)
assert len(codes) >= 1
assert all(c < (1 << renorm_bits) for c in codes[:-1])
assert codes[-1] < (1 << (2*renorm_bits))
codes = codes.copy()
cdf = []
for i in range(len(freqs) + 1):
cdf.append(sum(freqs[:i]))
assert len(cdf) == len(freqs) + 1
msg = []
mask = (1 << quant_bits) - 1
code = codes.pop()
while code >= (1 << renorm_bits):
pcode = code
s = code & mask
index = np.argmax(np.array(cdf) > s) - 1
code = (freqs[index] * (code >> quant_bits)
+ (code & mask)
- cdf[index])
msg = [alphabet[index]] + msg
if (code < (1 << renorm_bits)) and codes:
if DEBUG:
print ('renorm')
assert codes[-1] < (1 << renorm_bits)
code = (code << renorm_bits) + codes.pop()
if DEBUG:
print (pcode, ' -> ', code)
assert not codes, codes
return msg
|
cdc2c50aef8986f097558e367432a8f1927a9150
|
4da761358a18508cc35ab8c6d10d304c9affc7a8
|
/test_kernel_name.py
|
c65015e0971a5722f19b950d32e1e943360d0bf2
|
[
"BSD-3-Clause"
] |
permissive
|
jupyter-widgets/tutorial
|
c19dc1405676e3b44c1f1a4b48bad115d1da8330
|
c85413821b3d15eb278d514604f5e699dd5b4993
|
refs/heads/main
| 2023-08-21T00:44:01.968636
| 2022-07-12T15:21:35
| 2022-07-12T15:21:35
| 101,101,870
| 392
| 171
|
BSD-3-Clause
| 2022-07-12T16:03:14
| 2017-08-22T19:54:26
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 387
|
py
|
test_kernel_name.py
|
from pathlib import Path
from tools.kernel_names import get_kernel_name
TUTORIAL_KERNEL_NAME = 'widget-tutorial'
NOTEBOOK_DIRECTORY = Path(__file__) / '..' / 'notebooks'
def test_kernel_name():
notebooks = NOTEBOOK_DIRECTORY.glob('**/*.ipynb')
for notebook in notebooks:
kernel_name = get_kernel_name(str(notebook))
assert kernel_name == TUTORIAL_KERNEL_NAME
|
bebf26db5649817c9838ecaec116192c08813eb8
|
cb43c98e13ad13d21cf977fe19181f80c87a6e28
|
/audiofield/fields.py
|
fa2858382d32a985690e2304464d0165ce7d4ed7
|
[
"MIT"
] |
permissive
|
areski/django-audiofield
|
1003afe0d0ecf5bbbe8c041aff5af6ab951c3a08
|
80334c95a3444c133a4452df990eba20ab0f4789
|
refs/heads/develop
| 2023-02-25T02:08:54.617215
| 2023-02-20T08:03:23
| 2023-02-20T08:03:23
| 2,941,495
| 149
| 58
|
NOASSERTION
| 2023-02-20T08:03:24
| 2011-12-08T16:53:48
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 12,291
|
py
|
fields.py
|
#
# django-audiofield License
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2014 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from django.db.models.fields.files import FileField
from django.db.models import signals
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from django.utils.translation import gettext_lazy as _
from django import forms
from celery.utils.log import get_task_logger
from audiofield.middleware import threadlocals
from audiofield.tasks import audio_convert_task
import os
import subprocess
import shutil
from random import choice, seed
seed()
logger = get_task_logger(__name__)
CONVERT_TYPE_CHK = {0: 'org', 1: 'mp3', 2: 'wav', 3: 'ogg'}
def random_string(char_length=5, digit_length=10):
chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
digit = "1234567890"
pass_str_char = ''.join([choice(chars) for i in range(char_length)])
pass_str_digit = ''.join([choice(digit) for i in range(digit_length)])
return pass_str_char + '-' + pass_str_digit
class StdAudioField:
'''
Instances of this class will be used to access data of the converted
audio files
'''
def __init__(self, name):
self.name = name
self.storage = FileSystemStorage()
def path(self):
return self.storage.path(self.name)
def url(self):
return self.storage.url(self.name)
def size(self):
return self.storage.size(self.name)
class AudioField(FileField):
'''
Django field that behaves as FileField, with some extra features like:
- Audio Player
- Delete specific file
- Convert to specific format
'''
size = None
uuid = None
filename_prefix = 'audio-file-'
def __init__(self, *args, **kwargs):
"""Get allowed file extension type (ex. mp3, wav)"""
ext_whitelist = kwargs.pop("ext_whitelist", tuple())
self.ext_whitelist = [i.lower() for i in ext_whitelist]
super(AudioField, self).__init__(*args, **kwargs)
def clean(self, data, initial=None):
"""Audio field validation for file extension"""
data = super(AudioField, self).clean(data, initial)
request = threadlocals.get_current_request()
filename = data.name
ext = os.path.splitext(filename)[1]
ext = ext.lower()
if ext not in self.ext_whitelist:
error_msg = _("not allowed filetype!")
logger.error(error_msg)
raise forms.ValidationError(error_msg)
convert_to = request and int(request.POST["convert_type"])
ext = ext.split('.')[1]
audio_type = CONVERT_TYPE_CHK[convert_to]
error_msg = _("not allowed : file format conversion is not allowed for same audio type (except Wav)")
if convert_to:
if ext == audio_type and ext != 'wav':
error_msg += ' %s format !!' % ext
logger.error(error_msg)
raise forms.ValidationError(error_msg)
else:
pass
return data
def _get_converted_filename(self, filename):
# Not used
'''Returns the audio converted name associated to the standard audio filename
* Example: /var/www/myproject/media/audio/picture_1.wav
will return /var/www/myproject/media/audio/picture_1.converted.wav
'''
splitted_filename = list(os.path.splitext(filename))
splitted_filename.insert(1, '.converted')
logger.debug('converted file name')
return ''.join(splitted_filename)
def _convert_audio(self, filename, instance=None, ext=None):
'''Convert uploaded audio file to selected format'''
request = threadlocals.get_current_request()
convert_type = 0
channel_no = 0
freq_value = 0
nbchannels = 1
remix = ''
if 'convert_type' in request.POST:
convert_type = int(request.POST["convert_type"])
if 'channel_type' in request.POST:
channel_no = int(request.POST["channel_type"])
if 'freq_type' in request.POST:
freq_value = int(request.POST["freq_type"])
logger.info("convert audio : %s->%s" % (str(ext), CONVERT_TYPE_CHK[convert_type]))
splitted_filename = list(os.path.splitext(filename))[0] # converted filename without ext
filename_temp = filename[:-4] + '_temp'
# Find the number of channels
if os.path.isfile(filename):
command = "soxi -c %s" % filename
response = subprocess.Popen(command.split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(output, error) = response.communicate()
nbchannels = (int(output))
# prepare Sox parameters for Channels convertion
conv_channel = "-e signed-integer -c %s" % str(channel_no) if channel_no > 0 else ''
# prepare Sox parameters for Frequency convertion
conv_freq = "-r %s" % str(freq_value) if freq_value > 0 else ''
if nbchannels == 2:
# sox input.wav output.wav `remix -` performs a mix-down of all input channels to mono.
remix = 'remix -'
# 1) MP3 TO WAV
if ext == 'mp3' and CONVERT_TYPE_CHK[convert_type] == 'wav':
logger.debug("convert MP3 to WAV - channel %s freq: %s" % (str(channel_no), str(freq_value)))
conv = "sox %s %s %s %s.wav %s" % (filename, conv_freq, conv_channel, splitted_filename, remix)
conv = conv.replace(' ', ' ')
result = audio_convert_task.delay(conv)
logger.debug("Sox command :> %s" % conv)
# 2) MP3 TO OGG
if ext == 'mp3' and CONVERT_TYPE_CHK[convert_type] == 'ogg':
logger.debug('MP3 to OGG')
conv = "dir2ogg -q 4 %s" % (filename)
result = audio_convert_task.delay(conv)
# 3) WAV TO MP3
if ext == 'wav' and CONVERT_TYPE_CHK[convert_type] == 'mp3':
logger.debug('WAV to MP3')
conv = "sox %s %s.mp3 %s" % (filename, splitted_filename, remix)
result = audio_convert_task.delay(conv)
logger.debug("Sox command :> %s" % conv)
# 3) WAV TO WAV
if ext == 'wav' and CONVERT_TYPE_CHK[convert_type] == 'wav':
# if nbchannels == 2:
# remix = 'remix 1,2i'
filename_temp = filename_temp + '.wav'
conv = "sox %s %s %s %s.wav %s" % (filename_temp, conv_freq, conv_channel, splitted_filename, remix)
conv = conv.replace(' ', ' ')
# cmd = 'sox /usr/share/newfies/../newfies/usermedia/upload/audiofiles/audio-file-XFPQN-6216731785_temp.wav -r 8000 -e signed-integer -c 1 /usr/share/newfies/../newfies/usermedia/upload/audiofiles/audio-file-XFPQN-6216731785.wav'
# create a temp copy of the file
shutil.copy2(filename, filename_temp)
result = audio_convert_task.delay(conv)
logger.debug("result :> %s" % str(result))
# 4) WAV TO OGG
if ext == 'wav' and CONVERT_TYPE_CHK[convert_type] == 'ogg':
logger.debug('WAV to OGG')
conv = "sox %s %s.ogg %s" % (filename, splitted_filename, remix)
result = audio_convert_task.delay(conv)
# 5) OGG TO MP3
if ext == 'ogg' and CONVERT_TYPE_CHK[convert_type] == 'mp3':
logger.debug('OGG to MP3')
conv = "sox %s %s.mp3%s" % (filename, splitted_filename, remix)
result = audio_convert_task.delay(conv)
# 6) OGG TO WAV
if ext == 'ogg' and CONVERT_TYPE_CHK[convert_type] == 'wav':
logger.debug('OGG to WAV')
# conv = "sox %s %s.wav" % (filename, splitted_filename)
conv = "avconv -i %s -map_metadata 0:s:0 %s.wav" % (filename, splitted_filename)
result = audio_convert_task.delay(conv)
def _rename_audio(self, instance=None, **kwargs):
'''Rename uploaded audio file & calls methods to convert audio file format if
convert_to is selected'''
if getattr(instance, self.name):
filename = getattr(instance, self.name).path
# Get the extension and limit to 3 chars
ext = os.path.splitext(filename)[1].lower()[:4]
# Get new file name and make sure it's unique
dst = self.generate_filename(instance, '%s%s%s' % (self.filename_prefix, self.uuid, ext))
dst_fullpath = os.path.join(settings.MEDIA_ROOT, dst)
# Same file should not exits
if not os.path.isfile(dst_fullpath):
if os.path.abspath(filename) != os.path.abspath(dst_fullpath):
os.rename(filename, dst_fullpath)
self._convert_audio(dst_fullpath, instance, ext[1:4])
request = threadlocals.get_current_request()
convert_type = int(request.POST["convert_type"])
# 0 => Keep original
if convert_type > 0:
# Delete original audio file
if os.path.exists(dst_fullpath):
# Check for no .. and no *
# DISABLED Delete file
"""
if dst_fullpath.find('../../') == -1 and dst_fullpath.find('*') == -1:
os.remove(dst_fullpath)
"""
ext = '.' + CONVERT_TYPE_CHK[convert_type]
dst = self.generate_filename(instance, '%s%s%s' %
(self.filename_prefix, self.uuid, ext))
setattr(instance, self.attname, dst)
instance.save()
else:
error_msg = ("file already exists!")
logger.error(error_msg)
def _set_audio_converted(self, instance=None, **kwargs):
'''Creates a "audio_field" object as attribute of the FileField instance
audio_field attribute will be of the same class of original file, so
"path", "url", "name"... properties can be used'''
if getattr(instance, self.name):
filename = self.generate_filename(instance, os.path.basename(getattr(instance, self.name).path))
audio_field = StdAudioField(filename)
setattr(getattr(instance, self.name), 'audio_converted', audio_field)
def formfield(self, **kwargs):
'''Specify form field and widget to be used on the forms'''
from audiofield.widgets import AdminAudioFileWidget
from audiofield.forms import AudioFormField
kwargs['widget'] = AdminAudioFileWidget
kwargs['form_class'] = AudioFormField
return super(AudioField, self).formfield(**kwargs)
def save_form_data(self, instance, data):
'''Overwrite save_form_data to delete audio files if "delete" checkbox
is selected'''
if data == '__deleted__':
filename = getattr(instance, self.name).path
if os.path.exists(filename):
# Check for no .. and no *
if filename.find('../../') == -1 and filename.find('*') == -1:
os.remove(filename)
setattr(instance, self.name, None)
else:
self.uuid = random_string(5, 10)
super(AudioField, self).save_form_data(instance, data)
def contribute_to_class(self, cls, name):
'''Call methods for generating all operations on specified signals'''
super(AudioField, self).contribute_to_class(cls, name)
signals.post_save.connect(self._rename_audio, sender=cls)
signals.post_init.connect(self._set_audio_converted, sender=cls)
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([(
[AudioField],
[],
{
"ext_whitelist": ["ext_whitelist", {}],
},
),
], ["^audiofield\.fields\.AudioField"])
except ImportError:
# South is not enabled
pass
|
0dad0d2585191a17332cec484c3a0fa2cdc24bf9
|
aa2ae30a88361b4b80ffa28c4d8a54600bbee542
|
/Chapter17/lib/common.py
|
2135d9a6ed293ec2f4bbd4b38be17ae326296537
|
[
"MIT"
] |
permissive
|
PacktPublishing/Deep-Reinforcement-Learning-Hands-On-Second-Edition
|
6728fadb38076f6243da3d98b1cf18faf6b287af
|
d5a421d63c6d3ebbdfa54537fa5ce485bc2b9220
|
refs/heads/master
| 2023-07-05T23:08:32.621622
| 2022-01-17T12:18:54
| 2022-01-17T12:18:54
| 195,020,985
| 963
| 491
|
MIT
| 2023-03-25T01:00:07
| 2019-07-03T09:21:47
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,051
|
py
|
common.py
|
import numpy as np
import torch
from torch.autograd import Variable
import ptan
def unpack_batch_a2c(batch, net, last_val_gamma, device="cpu"):
"""
Convert batch into training tensors
:param batch:
:param net:
:return: states variable, actions tensor, reference values variable
"""
states = []
actions = []
rewards = []
not_done_idx = []
last_states = []
for idx, exp in enumerate(batch):
states.append(exp.state)
actions.append(exp.action)
rewards.append(exp.reward)
if exp.last_state is not None:
not_done_idx.append(idx)
last_states.append(exp.last_state)
states_v = ptan.agent.float32_preprocessor(states).to(device)
actions_v = torch.FloatTensor(actions).to(device)
# handle rewards
rewards_np = np.array(rewards, dtype=np.float32)
if not_done_idx:
last_states_v = ptan.agent.float32_preprocessor(last_states).to(device)
last_vals_v = net(last_states_v)[2]
last_vals_np = last_vals_v.data.cpu().numpy()[:, 0]
rewards_np[not_done_idx] += last_val_gamma * last_vals_np
ref_vals_v = torch.FloatTensor(rewards_np).to(device)
return states_v, actions_v, ref_vals_v
def unpack_batch_ddqn(batch, device="cpu"):
states, actions, rewards, dones, last_states = [], [], [], [], []
for exp in batch:
states.append(exp.state)
actions.append(exp.action)
rewards.append(exp.reward)
dones.append(exp.last_state is None)
if exp.last_state is None:
last_states.append(exp.state)
else:
last_states.append(exp.last_state)
states_v = ptan.agent.float32_preprocessor(states).to(device)
actions_v = ptan.agent.float32_preprocessor(actions).to(device)
rewards_v = ptan.agent.float32_preprocessor(rewards).to(device)
last_states_v = ptan.agent.float32_preprocessor(last_states).to(device)
dones_t = torch.BoolTensor(dones).to(device)
return states_v, actions_v, rewards_v, dones_t, last_states_v
|
79d1312bea50dd72ce0ebb4535608a570fa17c19
|
631fa563ce427d7851ec7a1839532fd06d94ed45
|
/pymodules/elf_parser.py
|
5b5fa72056f23097065d44562ba5e39346fa58b7
|
[
"Apache-2.0"
] |
permissive
|
cojocar/bin2llvm
|
72d6e5821834becf8e04208673c0f52bb586711e
|
56f20d39b4dd0ece862067158220aabeb0121426
|
refs/heads/master
| 2020-12-30T15:41:51.225129
| 2018-06-05T12:43:23
| 2018-06-05T12:43:29
| 91,162,880
| 144
| 17
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,597
|
py
|
elf_parser.py
|
#
# Copyright 2017 The bin2llvm Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
from elftools.elf.elffile import ELFFile
from elftools.elf.elffile import ELFFile
from elftools.elf.enums import ENUM_P_TYPE
import subprocess
import os
import json
import logging
logging.basicConfig()
log = logging.getLogger("translator")
def save_chunk(dst_path, src_path, offset, size):
log.debug("[Translator] Saving chunk to %s [off=%08x,size=%08x]" % (dst_path, offset, size))
dst = open(dst_path, 'wb')
with open(src_path, 'rb') as f:
f.seek(offset)
data = f.read(size)
# add padding
if len(data) < size:
data = data + '\x00' * (size - len(data))
dst.write(data[:size])
dst.close()
def do_elf(path_to_elf, out_dir='/tmp'):
"""This function reads information from elf file and translates it to
appropiate config structure.
"""
ret = {}
elf = ELFFile(open(path_to_elf, 'rb'))
if 'ARM' != elf.get_machine_arch():
raise Exception("Architecture not supported")
ret['architecture'] = 'arm'
ret['cpu_model'] = 'arm926'
ret['endianness'] = 'little' if elf.little_endian else 'big'
ret['entry_address'] = [elf.header.e_entry]
thumb_targets = []
targets = []
for sec in elf.iter_sections():
if sec.name.startswith(b'.symtab'):
log.info("[Translator] binary contains symbols! Using those instead of the single entry")
ret['entry_address'] = []
# nm can run on any type of elf binary
p = subprocess.Popen(['nm', path_to_elf], stdout=subprocess.PIPE)
out, _ = p.communicate()
for l in out.split('\n'):
try:
addr, t, name = l.split(' ')
except:
continue
if (t == 't' or t == 'T') and not name.startswith('$'):
targets.append(int(addr, 16))
# call readelf -s for getting the thumb bit
# somehow, the $a and $t are not always generated?
p = subprocess.Popen(['readelf', '-s', path_to_elf], \
stdout=subprocess.PIPE)
out, _ = p.communicate()
for l in out.split('\n'):
try:
_, addr, _, t, _, _, _, name = l.split()
except:
#print("QQ: %s: %d" % (l, len(l.split(' '))))
#print(str(l.split(None)))
continue
if t != 'FUNC':
continue
jumpPC = int(addr, 16)
if jumpPC & 1 == 0x1:
thumb_targets.append(jumpPC & -2)
segments = []
cnt = 0
mapped_targets = []
mapped_thumb_targets = []
for i in range(elf.num_segments()):
seg = elf.get_segment(i)
if seg.header.p_type != 'PT_LOAD':
continue
#print(dir(seg.header))
assert(seg.header.p_paddr == seg.header.p_vaddr)
padding = seg.header.p_paddr % 4096
#assert(seg.header.p_paddr % 4096 == 0)
new_section = {}
s = max(seg.header.p_memsz, seg.header.p_filesz)
# round up to 4k
if s % 4096 != 0:
s = 4096*int((s+4096)/4096)
s += padding
# round up to 4k
if s % 4096 != 0:
s = 4096*int((s+4096)/4096)
# build segment info
segm_name = 'seg-'+str(cnt)+'.bin'
segm_file = os.path.join(out_dir, segm_name)
offset = seg.header.p_offset-padding
assert(offset >= 0)
segm_desc = {}
segm_desc['file'] = segm_file
segm_desc['size'] = s
segm_desc['address'] = seg.header.p_paddr - padding
segm_desc['name'] = segm_name
# save chunk
save_chunk(segm_file, path_to_elf, offset, s)
cnt += 1
segments.append(segm_desc)
log.debug("[Translator] loaded %s%08x@%08x" % \
(seg.header.p_type, seg.header.p_paddr, \
seg.header.p_offset))
def inside_segment(e):
return e >= segm_desc['address'] and \
e < (segm_desc['address'] + \
segm_desc['size'])
# filter data
map(mapped_targets.append, filter(inside_segment, \
targets))
map(mapped_thumb_targets.append, filter(inside_segment, \
thumb_targets))
ret['segments'] = segments
# unique
mapped_thumb_targets = sorted(list(set(mapped_thumb_targets)))
mapped_targets = sorted(list(set(mapped_targets)))
log.debug("[Translator] elf: %d entries and %d thumb bits" % \
(len(mapped_targets), len(mapped_thumb_targets)))
if len(mapped_thumb_targets) > 0:
fout = os.path.join(out_dir, "is-thumb-initial.json")
with open(fout, 'wt') as f:
f.write(json.dumps(mapped_thumb_targets))
else:
fout = None
map(ret['entry_address'].append, mapped_targets)
return ret, fout
|
6b21ecbe09fa9a6ebbf95a6d8f2f1ac699d9f8ff
|
d50ec43131be668368200315d1d9d307071d5385
|
/keanu-python/nd4j/nd4j/__init__.py
|
5813c29617761e551579fa97c7aa7d0d2082731d
|
[
"MIT"
] |
permissive
|
improbable-research/keanu
|
605e4dc6a2f90f095c2c1ec91fa1222ae8d04530
|
99de10a15e0d4b33d323093a5cc2dd10b31c9954
|
refs/heads/develop
| 2023-04-14T01:17:29.130975
| 2021-09-21T10:24:48
| 2021-09-21T10:24:48
| 128,393,918
| 155
| 47
|
MIT
| 2023-04-12T00:18:07
| 2018-04-06T12:48:36
|
Java
|
UTF-8
|
Python
| false
| false
| 183
|
py
|
__init__.py
|
from pkg_resources import resource_filename
from .__version__ import __version__
def get_classpath():
classpath = resource_filename(__name__, 'classpath')
return classpath
|
2b80471a8c09594f787ffae42a156219816363b3
|
ea401c3e792a50364fe11f7cea0f35f99e8f4bde
|
/hackathon/HePing/SWCGAN/train.py
|
f92516cedd69722779e44dc44ac24f52adefcd0a
|
[
"MIT"
] |
permissive
|
Vaa3D/vaa3d_tools
|
edb696aa3b9b59acaf83d6d27c6ae0a14bf75fe9
|
e6974d5223ae70474efaa85e1253f5df1814fae8
|
refs/heads/master
| 2023-08-03T06:12:01.013752
| 2023-08-02T07:26:01
| 2023-08-02T07:26:01
| 50,527,925
| 107
| 86
|
MIT
| 2023-05-22T23:43:48
| 2016-01-27T18:19:17
|
C++
|
UTF-8
|
Python
| false
| false
| 12,365
|
py
|
train.py
|
import modelDCGAN1 as models
from keras.optimizers import RMSprop, Adagrad, Adam,SGD
import numpy as np
from neuron_dataset import *
import numpy.linalg as LA
import plot
import pickle
import h5py
from datetime import datetime
def save_swc(X_locations, X_parent,path,epoch,batch):
"""
save generate swc
:param X_locations:
:param X_parent:
:return:
"""
locations = np.squeeze(X_locations) # remove one dimension
# print(locations)
parent = np.squeeze(X_parent).argmax(axis=1) + 1 # argmax:Returns the indices of the maximum values along an axis.
#print("parent : ",parent)
full = np.zeros([parent.shape[0]+1, parent.shape[0]+1])
full[range(1, parent.shape[0]+1), parent - 1] = 1 # full neighbor matrix ,parent = 1
#print(full)
full = LA.pinv(np.eye(parent.shape[0]+1) - full) # pseudoinverse of a matrix,all parent
# print(full)
locations = np.dot(full, np.append(np.zeros([1, 3]), locations, axis=0)) # ?? dot:Dot product of two arrays, full * loactions(loactions add soma location)
# print("locations: ",locations)
M = np.zeros([parent.shape[0]+1, 7])
M[:, 0] = np.arange(1, parent.shape[0]+2)
M[0, 1] = 1
M[1:, 1] = 3 # type
M[:, 2:5] = locations
M[:, 5] = 1.0
M[1:, 6] = parent
M[0, 6] = -1 # soma
path = ('%s/epoch%s_batch%s.swc' % (path, epoch, batch))
print(path, "\n")
# write swc
f = open(path, 'w')
for i in range(M.shape[0]):
f.write("{0} {1} {2} {3} {4} {5} {6}\n".format(int(M[i][0]), int(M[i][1]), np.float16(M[i][2]), np.float16(M[i][3]), np.float16(M[i][4]), M[i][5], int(M[i][6])))
f.close()
def save_model_weights(g_model, m_model, d_model,
level, epoch, batch, list_d_loss, model_path_root):
"""
Save model weights.
Parameters
----------
g_model: keras model object
geometry generator model
m_model: keras model object
morphology generator model
d_model: keras model object
discriminator model
level: int
level in the hierarchy
epoch: int
epoch #
batch: int
mini-batch #
list_d_loss: list
list of discriminator loss trace
model_path_root: str
path where model files should be saved
"""
model_path = ('%s/level%s' % (model_path_root, level))
if not os.path.exists(model_path):
os.mkdir(model_path) # create directory
print(model_path)
print("\n")
g_file = os.path.join(model_path, '%s_epoch_%s_batch_%s.h5' % (g_model.name, epoch, batch))
print(g_file)
g_model.save_weights(g_file, overwrite=True)
m_file = os.path.join(model_path, '%s_epoch_%s_batch_%s.h5' %
(m_model.name, epoch, batch))
print(m_file)
m_model.save_weights(m_file, overwrite=True)
d_file = os.path.join(model_path, '%s_epoch_%s_batch_%s.h5' %
(d_model.name, epoch, batch))
print(d_file)
d_model.save_weights(d_file, overwrite=True)
d_loss_file = os.path.join(model_path, '%s_epoch_%s_batch_%s.h5' %
('DiscLoss', epoch, batch))
print(d_loss_file)
pickle.dump(list_d_loss, open(d_loss_file, "wb"))
def train_model(input_dim=100, n_nodes=256, batch_size=16, n_epochs=100, d_iters=5, g_iters=5, verbose=True):
geom_model = list()
morph_model = list()
disc_model = list()
gan_model = list()
# Discriminator
d_model = models.discriminator(n_nodes=n_nodes,
batch_size=batch_size)
# Generators and GANs
g_model, m_model = \
models.generator(n_nodes=n_nodes,
noise_dim=input_dim,
batch_size=batch_size,
)
stacked_model = \
models.discriminator_on_generator(g_model,
m_model,
d_model,
input_dim=input_dim)
# Collect all models into a list
disc_model.append(d_model)
geom_model.append(g_model)
morph_model.append(m_model)
gan_model.append(stacked_model)
# ###############
# Optimizers
# ###############
optim_d = Adagrad() # RMSprop(lr=lr_discriminator)
optim_g = Adagrad() # RMSprop(lr=lr_generator)
# ##############
# Train
# ##############
# ---------------
# Compile models
# ---------------
g_model.compile(loss='mse', optimizer=optim_g)
m_model.compile(loss='mse', optimizer=optim_g)
d_model.trainable = False
stacked_model.compile(loss='binary_crossentropy',
optimizer=optim_g)
d_model.trainable = True
d_model.compile(loss='binary_crossentropy',
optimizer=optim_d)
if verbose:
print(20*'--')
print("start loop epoch\n")
BASE_DIR = os.path.dirname(os.path.abspath(__file__)) # get this file full path and remove filename
ROOT_DIR = BASE_DIR
DATA_PATH = os.path.join(ROOT_DIR, 'data/neuron_data1')
f = open('./data/log', 'w')
for e in range(n_epochs):
g_iters = 0
if verbose:
print("")
print("Epoch #{0}".format(e))
print("")
TRAIN_DATASET = neuron_dataset(root=DATA_PATH, batch_size=batch_size, npoints=n_nodes, num_channels=3)
batch_data = {}
cur_batch_data_geo = np.zeros([batch_size, n_nodes, 3])
cur_batch_data_morp = np.zeros([batch_size, n_nodes, n_nodes])
batch_idx = 0
while TRAIN_DATASET.has_next_batch():
print(str(datetime.now()))
print("epoch:", e, " batch:", batch_idx, "\n")
f.writelines(str(datetime.now())+"\n")
batch_data['geometry'], batch_data['morphology'] = TRAIN_DATASET.next_batch(augment=False) # ----------------- not augment now----------------
# bsize = batch_data['geometry'].shape[0]
cur_batch_data_geo[0:batch_size, :, :] = batch_data['geometry']
cur_batch_data_morp[0:batch_size, :, :] = batch_data['morphology']
# print(cur_batch_data_geo.shape, cur_batch_data_morp.shape)
list_d_loss = list()
list_g_loss = list()
# ----------------------------
# Step 1: Train discriminator
# ----------------------------
print("Step 1: Train discriminator\n")
for d_iter in range(d_iters):
print("discriminator pretraining {0}".format(d_iter))
X_parent_real = cur_batch_data_morp
X_locations_real = cur_batch_data_geo
noise_code = np.random.rand(batch_size, 1, input_dim) # Random values in a given shape
print("start predict")
X_locations_gen = g_model.predict(noise_code) # (batch_size,n_nodes,3) then delete soma location,soma(0,0,0)
X_parent_gen = m_model.predict(noise_code) # (batch_size,n_nodes,n_nodes)
# parent = np.squeeze(X_parent_gen).argmax(axis=2) # batch_size, n_nodes
# print(parent)
# X_parent_gen_proc = np.zeros((batch_size, n_nodes, n_nodes))
# X_parent_gen_proc[:, :, parent[:, :]] = 1
# print("generate parent")
# print(X_parent_gen_proc)
#X_parent_gen[:, 0:1, :] = np.zeros(shape=(batch_size, 1, n_nodes))
# print(X_parent_gen.shape)
y_real = np.ones((X_locations_real.shape[0], 1, 1))
y_gen = np.zeros((X_locations_gen.shape[0], 1, 1))
cutting = int(batch_size / 2)
X_locations_real_first_half = np.append(X_locations_real[:cutting, :, :],
X_locations_gen[:cutting, :, :],
axis=0)
X_parent_real_first_half = np.append(X_parent_real[:cutting, :, :],
X_parent_gen[:cutting, :, :],
axis=0)
y_real_first_half = np.append(y_real[:cutting, :, :],
y_gen[:cutting, :, :],
axis=0)
X_locations_real_second_half = np.append(X_locations_real[cutting:, :, :],
X_locations_gen[cutting:, :, :],
axis=0)
X_parent_real_second_half = np.append(X_parent_real[cutting:, :, :],
X_parent_real[cutting:, :, :],
axis=0)
y_real_second_half = np.append(y_real[cutting:, :, :],
y_gen[cutting:, :, :],
axis=0)
disc_loss = d_model.train_on_batch([X_locations_real_first_half,X_parent_real_first_half], y_real_first_half)
#disc_loss = d_model.train_on_batch([X_locations_real, X_parent_real], y_real)
list_d_loss.append(disc_loss)
print(disc_loss)
disc_loss = d_model.train_on_batch([X_locations_real_second_half, X_parent_real_second_half], y_real_second_half)
#disc_loss = d_model.train_on_batch([X_locations_gen, X_parent_gen], y_gen)
list_d_loss.append(disc_loss)
print(disc_loss)
if verbose:
print("After{0} iterations".format(d_iters))
print("Discriminator loss = {0}".format(disc_loss))
f.writelines('epoch:{0}, batch:{1}, After {2} iterations\n'.format(e,batch_idx,d_iters))
f.writelines('discriminator loss: {0}\n'.format(disc_loss))
# -------------------------------
# step 2: train generators alternately
# ---------------------------------
# Freeze the discriminator
print("step2 : train generations alternately\n")
d_model.trainable = False
noise_input = np.random.rand(batch_size, 1, input_dim)
# print(noise_code)
gen_loss = \
stacked_model.train_on_batch([noise_input],
y_real)
list_g_loss.append(gen_loss)
if verbose:
print("")
print(" Generator_Loss: {0}".format(gen_loss))
f.writelines('epoch:{0}, batch:{1}, generator loss: {2}\n'.format(e,batch_idx,gen_loss))
f.flush()
# Unfreeze the discriminator
d_model.trainable = True
g_iters += 1
batch_idx += 1
# Save model weights (few times per epoch)
if batch_idx % 5 == 0:
if verbose:
print("level #{0} Epoch #{1} Batch #{2}".format(1, e, batch_idx))
save_swc(X_locations_gen[0, 1:, :], X_parent_gen[0, 1:, :], path='D:/gen_vir_experiment_code/swcgan_weight/generate_data', epoch=e, batch=batch_idx)
print("plot discriminator loss")
# plot.plot_loss_trace(list_d_loss, "discriminator loss")
# print("plot generator loss")
# plot.plot_loss_trace(list_g_loss, "generate loss")
#plot.plot_adjacency(X_parent_real[0:1, :, :], X_parent_gen[0:1, :, :], "parent real gen")
print("display loss trace\n")
save_model_weights(g_model, m_model, d_model, 0, e, batch_idx, list_d_loss, model_path_root= 'D:/gen_vir_experiment_code/swcgan_weight')
# save model
geom_model = g_model
morph_model = m_model
disc_model = d_model
gan_model = stacked_model
f.close()
return geom_model, \
morph_model, \
disc_model, \
gan_model
|
6e4630e98f3266e1320c84946288386909f54792
|
c6b13016ff1c07fd612e27a4cf33dc4cba4146d5
|
/test/test_target.py
|
781eb0cfdef3f259c65d7ce42b02f06f72d1a202
|
[
"MIT"
] |
permissive
|
m3dev/gokart
|
64bd6c40197b002587d013ac9e2b61315451812a
|
f5a368304f9cb8db8d392f15f0f9c0ac956cd999
|
refs/heads/master
| 2023-08-04T09:10:53.567582
| 2023-08-02T20:38:11
| 2023-08-02T20:38:11
| 162,871,731
| 308
| 59
|
MIT
| 2023-08-02T20:38:13
| 2018-12-23T07:40:27
|
Python
|
UTF-8
|
Python
| false
| false
| 8,964
|
py
|
test_target.py
|
import io
import os
import shutil
import unittest
from datetime import datetime
from unittest.mock import patch
import boto3
import numpy as np
import pandas as pd
from matplotlib import pyplot
from moto import mock_s3
from gokart.file_processor import _ChunkedLargeFileReader
from gokart.target import make_model_target, make_target
def _get_temporary_directory():
return os.path.abspath(os.path.join(os.path.dirname(__name__), 'temporary'))
class LocalTargetTest(unittest.TestCase):
def tearDown(self):
shutil.rmtree(_get_temporary_directory(), ignore_errors=True)
def test_save_and_load_pickle_file(self):
obj = 1
file_path = os.path.join(_get_temporary_directory(), 'test.pkl')
target = make_target(file_path=file_path, unique_id=None)
target.dump(obj)
with unittest.mock.patch('gokart.file_processor._ChunkedLargeFileReader', wraps=_ChunkedLargeFileReader) as monkey:
loaded = target.load()
monkey.assert_called()
self.assertEqual(loaded, obj)
def test_save_and_load_text_file(self):
obj = 1
file_path = os.path.join(_get_temporary_directory(), 'test.txt')
target = make_target(file_path=file_path, unique_id=None)
target.dump(obj)
loaded = target.load()
self.assertEqual(loaded, [str(obj)], msg='should save an object as List[str].')
def test_save_and_load_gzip(self):
obj = 1
file_path = os.path.join(_get_temporary_directory(), 'test.gz')
target = make_target(file_path=file_path, unique_id=None)
target.dump(obj)
loaded = target.load()
self.assertEqual(loaded, [str(obj)], msg='should save an object as List[str].')
def test_save_and_load_npz(self):
obj = np.ones(shape=10, dtype=np.float32)
file_path = os.path.join(_get_temporary_directory(), 'test.npz')
target = make_target(file_path=file_path, unique_id=None)
target.dump(obj)
loaded = target.load()
np.testing.assert_almost_equal(obj, loaded)
def test_save_and_load_figure(self):
figure_binary = io.BytesIO()
pd.DataFrame(dict(x=range(10), y=range(10))).plot.scatter(x='x', y='y')
pyplot.savefig(figure_binary)
figure_binary.seek(0)
file_path = os.path.join(_get_temporary_directory(), 'test.png')
target = make_target(file_path=file_path, unique_id=None)
target.dump(figure_binary.read())
loaded = target.load()
self.assertGreater(len(loaded), 1000) # any binary
def test_save_and_load_csv(self):
obj = pd.DataFrame(dict(a=[1, 2], b=[3, 4]))
file_path = os.path.join(_get_temporary_directory(), 'test.csv')
target = make_target(file_path=file_path, unique_id=None)
target.dump(obj)
loaded = target.load()
pd.testing.assert_frame_equal(loaded, obj)
def test_save_and_load_tsv(self):
obj = pd.DataFrame(dict(a=[1, 2], b=[3, 4]))
file_path = os.path.join(_get_temporary_directory(), 'test.tsv')
target = make_target(file_path=file_path, unique_id=None)
target.dump(obj)
loaded = target.load()
pd.testing.assert_frame_equal(loaded, obj)
def test_save_and_load_parquet(self):
obj = pd.DataFrame(dict(a=[1, 2], b=[3, 4]))
file_path = os.path.join(_get_temporary_directory(), 'test.parquet')
target = make_target(file_path=file_path, unique_id=None)
target.dump(obj)
loaded = target.load()
pd.testing.assert_frame_equal(loaded, obj)
def test_save_and_load_feather(self):
obj = pd.DataFrame(dict(a=[1, 2], b=[3, 4]), index=pd.Index([33, 44], name='object_index'))
file_path = os.path.join(_get_temporary_directory(), 'test.feather')
target = make_target(file_path=file_path, unique_id=None)
target.dump(obj)
loaded = target.load()
pd.testing.assert_frame_equal(loaded, obj)
def test_save_and_load_feather_without_store_index_in_feather(self):
obj = pd.DataFrame(dict(a=[1, 2], b=[3, 4]), index=pd.Index([33, 44], name='object_index')).reset_index()
file_path = os.path.join(_get_temporary_directory(), 'test.feather')
target = make_target(file_path=file_path, unique_id=None, store_index_in_feather=False)
target.dump(obj)
loaded = target.load()
pd.testing.assert_frame_equal(loaded, obj)
def test_last_modified_time(self):
obj = pd.DataFrame(dict(a=[1, 2], b=[3, 4]))
file_path = os.path.join(_get_temporary_directory(), 'test.csv')
target = make_target(file_path=file_path, unique_id=None)
target.dump(obj)
t = target.last_modification_time()
self.assertIsInstance(t, datetime)
def test_last_modified_time_without_file(self):
file_path = os.path.join(_get_temporary_directory(), 'test.csv')
target = make_target(file_path=file_path, unique_id=None)
with self.assertRaises(FileNotFoundError):
target.last_modification_time()
def test_save_pandas_series(self):
obj = pd.Series(data=[1, 2], name='column_name')
file_path = os.path.join(_get_temporary_directory(), 'test.csv')
target = make_target(file_path=file_path, unique_id=None)
target.dump(obj)
loaded = target.load()
pd.testing.assert_series_equal(loaded['column_name'], obj)
def test_dump_with_lock(self):
with patch('gokart.target.wrap_with_dump_lock') as wrap_with_lock_mock:
obj = 1
file_path = os.path.join(_get_temporary_directory(), 'test.pkl')
target = make_target(file_path=file_path, unique_id=None)
target.dump(obj, lock_at_dump=True)
wrap_with_lock_mock.assert_called_once()
def test_dump_without_lock(self):
with patch('gokart.target.wrap_with_dump_lock') as wrap_with_lock_mock:
obj = 1
file_path = os.path.join(_get_temporary_directory(), 'test.pkl')
target = make_target(file_path=file_path, unique_id=None)
target.dump(obj, lock_at_dump=False)
wrap_with_lock_mock.assert_not_called()
class S3TargetTest(unittest.TestCase):
@mock_s3
def test_save_on_s3(self):
conn = boto3.resource('s3', region_name='us-east-1')
conn.create_bucket(Bucket='test')
obj = 1
file_path = os.path.join('s3://test/', 'test.pkl')
target = make_target(file_path=file_path, unique_id=None)
target.dump(obj)
loaded = target.load()
self.assertEqual(loaded, obj)
@mock_s3
def test_last_modified_time(self):
conn = boto3.resource('s3', region_name='us-east-1')
conn.create_bucket(Bucket='test')
obj = 1
file_path = os.path.join('s3://test/', 'test.pkl')
target = make_target(file_path=file_path, unique_id=None)
target.dump(obj)
t = target.last_modification_time()
self.assertIsInstance(t, datetime)
@mock_s3
def test_last_modified_time_without_file(self):
conn = boto3.resource('s3', region_name='us-east-1')
conn.create_bucket(Bucket='test')
file_path = os.path.join('s3://test/', 'test.pkl')
target = make_target(file_path=file_path, unique_id=None)
with self.assertRaises(FileNotFoundError):
target.last_modification_time()
class ModelTargetTest(unittest.TestCase):
def tearDown(self):
shutil.rmtree(_get_temporary_directory(), ignore_errors=True)
@staticmethod
def _save_function(obj, path):
make_target(file_path=path).dump(obj)
@staticmethod
def _load_function(path):
return make_target(file_path=path).load()
def test_model_target_on_local(self):
obj = 1
file_path = os.path.join(_get_temporary_directory(), 'test.zip')
target = make_model_target(file_path=file_path,
temporary_directory=_get_temporary_directory(),
save_function=self._save_function,
load_function=self._load_function)
target.dump(obj)
loaded = target.load()
self.assertEqual(loaded, obj)
@mock_s3
def test_model_target_on_s3(self):
conn = boto3.resource('s3', region_name='us-east-1')
conn.create_bucket(Bucket='test')
obj = 1
file_path = os.path.join('s3://test/', 'test.zip')
target = make_model_target(file_path=file_path,
temporary_directory=_get_temporary_directory(),
save_function=self._save_function,
load_function=self._load_function)
target.dump(obj)
loaded = target.load()
self.assertEqual(loaded, obj)
if __name__ == '__main__':
unittest.main()
|
7bca36f256566b9578533163c9cf60e00d67ca20
|
ec5d1ad8418dd62039e1dd8d6d2129ed3d7504de
|
/bridge/python/text_message_sender.py
|
70533c5cd79b15cb47cdd49f3b79004a43a366e4
|
[] |
no_license
|
yusufyilmazfr/tasarim-desenleri-turkce-kaynak
|
88feba7369fd4f2609f9dfe27d314f87a5214a7b
|
f666e998247d683a9f734f8c8802ab38c7da6915
|
refs/heads/master
| 2023-09-01T11:29:07.908507
| 2023-07-31T07:08:29
| 2023-07-31T07:08:29
| 244,465,123
| 3,298
| 448
| null | 2023-08-20T10:37:03
| 2020-03-02T20:10:59
|
HTML
|
UTF-8
|
Python
| false
| false
| 180
|
py
|
text_message_sender.py
|
from message_sender import MessageSender
class TextMessageCenter(MessageSender):
def send_message(self, message):
print("TextMessageSender: Sending text message...")
|
864cec53de526e0447e08c95ce792d522950efe3
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/DPGAnalysis/SiStripTools/python/seedmultiplicitymonitor_newtracking_cfi.py
|
1d453b1ae14a03c1a57ea850831c8eb262b6410d
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 1,926
|
py
|
seedmultiplicitymonitor_newtracking_cfi.py
|
import FWCore.ParameterSet.Config as cms
seedmultiplicitymonitor = cms.EDAnalyzer('SeedMultiplicityAnalyzer',
TTRHBuilder = cms.string('WithTrackAngle'),
seedCollections = cms.VPSet(cms.PSet(src=cms.InputTag("initialStepSeeds")),
cms.PSet(src=cms.InputTag("lowPtTripletStepSeeds")),
cms.PSet(src=cms.InputTag("pixelPairStepSeeds"),
maxValue=cms.untracked.double(500000),nBins=cms.untracked.uint32(2000)),
cms.PSet(src=cms.InputTag("detachedTripletStepSeeds")),
cms.PSet(src=cms.InputTag("mixedTripletStepSeedsA")),
cms.PSet(src=cms.InputTag("mixedTripletStepSeedsB")),
cms.PSet(src=cms.InputTag("mixedTripletStepSeeds"),
maxValue=cms.untracked.double(200000),nBins=cms.untracked.uint32(2000)),
cms.PSet(src=cms.InputTag("pixelLessStepSeeds"),
maxValue=cms.untracked.double(200000),nBins=cms.untracked.uint32(2000)),
cms.PSet(src=cms.InputTag("tobTecStepSeeds"))
),
multiplicityCorrelations = cms.VPSet()
)
|
23ecc0f87e2898c0f95b46ec9c1849048f883b43
|
6946f9a3e9d57b00ea275b2303ced0dedcdba1d4
|
/qf_lib/plotting/charts/candlestick_chart.py
|
36672a7b0057b8523b03a8fffd41b51af8c7f0b7
|
[
"Apache-2.0"
] |
permissive
|
quarkfin/qf-lib
|
8eaf76e3db385295ff8845b3250ba64a6fcfc7a6
|
f707e51bc2ff45f6e46dcdd24d59d83ce7dc4f94
|
refs/heads/master
| 2023-08-31T17:41:57.213680
| 2023-08-29T10:01:49
| 2023-08-29T10:01:49
| 202,696,503
| 379
| 51
|
Apache-2.0
| 2023-09-05T06:11:35
| 2019-08-16T09:10:20
|
Python
|
UTF-8
|
Python
| false
| false
| 6,159
|
py
|
candlestick_chart.py
|
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, List
import matplotlib as mpl
from pandas import date_range
from qf_lib.common.enums.price_field import PriceField
from qf_lib.containers.dataframe.prices_dataframe import PricesDataFrame
from qf_lib.containers.series.prices_series import PricesSeries
from qf_lib.containers.series.qf_series import QFSeries
from qf_lib.plotting.charts.chart import Chart
class CandlestickChart(Chart):
"""
Plots a Candlestick chart for a prices data frame. The data frame should contain at least the following columns:
- PriceField.Open
- PriceField.Close
- PriceField.High
- PriceField.Low
"""
def __init__(self, data: PricesDataFrame, title: str):
super().__init__()
assert all(price_field in data.columns for price_field
in [PriceField.Open, PriceField.Close, PriceField.High, PriceField.Low]), \
"In order to plot the Candlestick Chart the data frame requires open, high, low and close prices"
self.data = data
self.title = title
self.label_size = 5
mpl.rc('ytick', labelsize=self.label_size)
self.color_green = '#3CB371'
self.color_red = '#FA8072'
def plot(self, figsize: Tuple[float, float] = None) -> None:
self._setup_axes_if_necessary(figsize)
ax = self.axes
full_data = self.get_full_data_range(self.data)
data_len = len(full_data)
self.axes.set_xlim(0, data_len)
if data_len > 1500:
# plot line chart instead as there are too many bars
data = PricesSeries(full_data.reset_index(drop=True)[PriceField.Close])
self.axes.plot(data, linewidth=0.5)
else:
for index, data in enumerate(full_data.iterrows()):
_, price_values = data
self._plot_candlestick(price_values, index)
# Format the x axis to show up to 10 x tick labels
every_nth_tick = data_len // 10
ax.set_xticks(range(0, data_len, every_nth_tick))
ticks_to_be_plotted = full_data.index[::every_nth_tick]
ax.tick_params(axis='both', which='major', labelsize=self.label_size)
time_len = full_data.index[-1] - full_data.index[0]
if time_len.days > 10:
ticks_to_be_plotted = ticks_to_be_plotted.strftime('%Y-%m-%d')
ax.set_xticklabels(ticks_to_be_plotted)
else:
ax.set_xticklabels(ticks_to_be_plotted, rotation=7)
# Format the y axis
formatter = mpl.ticker.FormatStrFormatter('%.2f')
ax.yaxis.set_major_formatter(formatter)
ax.set_title(self.title)
self._apply_decorators()
def get_full_data_range(self, container):
full_range = date_range(self.data.index[0], self.data.index[-1],
freq=self.data.get_frequency()[PriceField.Close].to_pandas_freq())
full_data = container.reindex(full_range)
return full_data
def add_highlight(self, highlight_series: QFSeries):
"""
Add a background highlight to the plot. The highlights are based on the values in the highlight_series.
Parameters
------------
highlight_series: QFSeries
Series containing values used to highlight the background. In case of values > 0 the background colour
is set to green and in case of values < 0 the background colour is set to red. For all indices for which
the values are equal to 0 the original background color is preserved.
"""
full_series = self.get_full_data_range(highlight_series)
full_series = full_series.ffill()
self._setup_axes_if_necessary()
previous_value = 0
start_index = 0
margin = 0.5
for index, current_value in enumerate(full_series):
# add red of green highlights of exposures
if previous_value != current_value:
if previous_value == 1:
self.axes.axvspan(start_index - margin, index - margin, facecolor=self.color_green, alpha=0.3)
elif previous_value == -1:
self.axes.axvspan(start_index - margin, index - margin, facecolor=self.color_red, alpha=0.3)
start_index = index
previous_value = current_value
# plot last highlight at the end if needed
if previous_value == 1:
self.axes.axvspan(start_index - margin, len(full_series), facecolor=self.color_green, alpha=0.3)
elif previous_value == -1:
self.axes.axvspan(start_index - margin, len(full_series), facecolor=self.color_red, alpha=0.3)
def _plot_candlestick(self, data, index):
""" Create a green rectangle in case of a rising price or a red one in case of a falling price. """
color = self.color_green if data[PriceField.Close] > data[PriceField.Open] else self.color_red
self.axes.plot([index, index], [data[PriceField.Low], data[PriceField.High]],
linewidth=0.2, color='black', zorder=2)
rectangle = mpl.patches.Rectangle((index - 0.35, data[PriceField.Open]), 0.7,
(data[PriceField.Close] - data[PriceField.Open]),
facecolor=color, edgecolor='black', linewidth=0.3, zorder=3)
self.axes.add_patch(rectangle)
def apply_data_element_decorators(self, data_element_decorators: List["DataElementDecorator"]):
pass
|
db3d71f87411984c6e5f9d1e27a2421a1dfc2063
|
150a7b11cb531f8bc2a045aefcf2ebe1d151efa3
|
/ocs_ci/utility/gcp.py
|
3c093376169a1f22326a6ae56ecee7d6390e0d3d
|
[
"MIT"
] |
permissive
|
red-hat-storage/ocs-ci
|
c7ac414e1b86552da0439223dfa9bca39977f31a
|
5e9e504957403148e413326f65c3769bf9d8eb39
|
refs/heads/master
| 2023-08-17T16:19:51.154403
| 2023-08-17T13:27:12
| 2023-08-17T13:27:12
| 179,558,938
| 146
| 210
|
MIT
| 2023-09-14T16:38:44
| 2019-04-04T19:00:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,497
|
py
|
gcp.py
|
# -*- coding: utf8 -*-
"""
Module for interactions with OCP/OCS Cluster on Google Cloud platform level.
It's using libcloud_ module as much as possible, but if that is not feasible,
we can use module from `Google Cloud python libraries`_ as well. This is not
the case so far.
.. _libcloud: https://libcloud.readthedocs.io/en/latest/compute/drivers/gce.html
.. _`Google Cloud python libraries`: https://cloud.google.com/python/docs/reference
"""
import json
import logging
import os
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from ocs_ci.framework import config
logger = logging.getLogger(name=__file__)
# default location of files with necessary GCP cluster details
SERVICE_ACCOUNT_KEY_FILEPATH = os.path.expanduser("~/.gcp/osServiceAccount.json")
"""str: absolute filepath of json file with service account key
This is json key file of ``sg-serv-account`` service account, which has full
admin rights in given GCP project. The same key file is used by openshift
installer during OCP installation to create all cluster resources from virtual
machines to hostnames. Modules from ocs-ci are using the same key to get full
cluster access as well.
For more details, see `GCP documentation on ServiceAccountKey resource
<https://cloud.google.com/iam/docs/reference/rest/v1/projects.serviceAccounts.keys>`_
"""
def load_service_account_key_dict(filepath=SERVICE_ACCOUNT_KEY_FILEPATH):
"""
Load GCP Service Account key from osServiceAccount.json file and parse it
into a dictionary.
Args:
filepath (str): path of the osServiceAccount.json file
Returns:
dictionary with the service account details
"""
with open(filepath, "r") as sa_file:
sa_dict = json.load(sa_file)
logger.debug(
"fetching GCP service account (for client %s) from %s file",
sa_dict.get("client_email"),
filepath,
)
return sa_dict
class GoogleCloudUtil:
"""
Utility wrapper class for Google Cloud OCP cluster. Design of the class
follows similar AWS and Azure class.
"""
_compute_driver = None
_service_account = None
def __init__(self, region_name=None):
"""
Constructor for GCP cluster util class.
Args:
region_name (str): Name of GCP region (such as 'europe-west1'), if
not specified, the value is loaded from ocs-ci config file.
"""
self._region_name = region_name or config.ENV_DATA["region"]
@property
def service_account(self):
"""
Dictionary with GCP service account details, which contains
authentication keys and cluster details loaded from *Service Account
Key file*.
"""
if not self._service_account:
self._service_account = load_service_account_key_dict()
return self._service_account
@property
def compute_driver(self):
"""
Compute Driver instance for GCP.
"""
if self._compute_driver is not None:
return self._compute_driver
service_account_username = self.service_account["client_email"]
project_id = self.service_account["project_id"]
Driver = get_driver(Provider.GCE)
self._compute_driver = Driver(
service_account_username,
SERVICE_ACCOUNT_KEY_FILEPATH,
project=project_id,
datacenter=self._region_name,
)
return self._compute_driver
|
7de0bb5dfb3e0f3297765ae2b4a3b733cfcc969b
|
d5dcc40a62ddb018447cafaa2e7862c2bceed48a
|
/snapshottest/module.py
|
6d7db10cc13e233d115375ad48a58c5975f26305
|
[
"MIT"
] |
permissive
|
syrusakbary/snapshottest
|
fcbc5f9e6eb0c7200980ca145d6052af8f19d728
|
770b8f14cd965d923a0183a0e531e9ec0ba20192
|
refs/heads/master
| 2023-08-26T02:04:47.810032
| 2023-07-21T01:38:33
| 2023-07-21T01:38:33
| 87,759,082
| 520
| 99
|
MIT
| 2023-07-21T01:38:34
| 2017-04-10T02:45:52
|
Python
|
UTF-8
|
Python
| false
| false
| 8,462
|
py
|
module.py
|
import codecs
import errno
import os
import imp
from collections import defaultdict
import logging
from .snapshot import Snapshot
from .formatter import Formatter
from .error import SnapshotNotFound
logger = logging.getLogger(__name__)
def _escape_quotes(text):
return text.replace("'", "\\'")
class SnapshotModule(object):
_snapshot_modules = {}
def __init__(self, module, filepath):
self._original_snapshot = None
self._snapshots = None
self.module = module
self.filepath = filepath
self.imports = defaultdict(set)
self.visited_snapshots = set()
self.new_snapshots = set()
self.failed_snapshots = set()
self.imports["snapshottest"].add("Snapshot")
def load_snapshots(self):
try:
source = imp.load_source(self.module, self.filepath)
# except FileNotFoundError: # Python 3
except (IOError, OSError) as err:
if err.errno == errno.ENOENT:
return Snapshot()
else:
raise
else:
assert isinstance(source.snapshots, Snapshot)
return source.snapshots
def visit(self, snapshot_name):
self.visited_snapshots.add(snapshot_name)
def delete_unvisited(self):
for unvisited in self.unvisited_snapshots:
del self.snapshots[unvisited]
@property
def unvisited_snapshots(self):
return set(self.snapshots.keys()) - self.visited_snapshots
@classmethod
def total_unvisited_snapshots(cls):
unvisited_snapshots = 0
unvisited_modules = 0
for module in cls.get_modules():
unvisited_snapshot_len = len(module.unvisited_snapshots)
unvisited_snapshots += unvisited_snapshot_len
unvisited_modules += min(unvisited_snapshot_len, 1)
return unvisited_snapshots, unvisited_modules
@classmethod
def get_modules(cls):
return SnapshotModule._snapshot_modules.values()
@classmethod
def stats_for_module(cls, getter):
count_snapshots = 0
count_modules = 0
for module in SnapshotModule._snapshot_modules.values():
length = getter(module)
count_snapshots += length
count_modules += min(length, 1)
return count_snapshots, count_modules
@classmethod
def stats_unvisited_snapshots(cls):
return cls.stats_for_module(lambda module: len(module.unvisited_snapshots))
@classmethod
def stats_visited_snapshots(cls):
return cls.stats_for_module(lambda module: len(module.visited_snapshots))
@classmethod
def stats_new_snapshots(cls):
return cls.stats_for_module(lambda module: len(module.new_snapshots))
@classmethod
def stats_failed_snapshots(cls):
return cls.stats_for_module(lambda module: len(module.failed_snapshots))
@classmethod
def stats_successful_snapshots(cls):
stats_visited = cls.stats_visited_snapshots()
stats_failed = cls.stats_failed_snapshots()
return stats_visited[0] - stats_failed[0]
@classmethod
def has_snapshots(cls):
return cls.stats_visited_snapshots()[0] > 0
@property
def original_snapshot(self):
if not self._original_snapshot:
self._original_snapshot = self.load_snapshots()
return self._original_snapshot
@property
def snapshots(self):
if not self._snapshots:
self._snapshots = Snapshot(self.original_snapshot)
return self._snapshots
def __getitem__(self, test_name):
try:
return self.snapshots[test_name]
except KeyError:
raise SnapshotNotFound(self, test_name)
def __setitem__(self, key, value):
if key not in self.snapshots:
# It's a new test
self.new_snapshots.add(key)
self.snapshots[key] = value
def mark_failed(self, key):
return self.failed_snapshots.add(key)
@property
def snapshot_dir(self):
return os.path.dirname(self.filepath)
def save(self):
if self.original_snapshot == self.snapshots:
# If there are no changes, we do nothing
return
# Create the snapshot dir in case doesn't exist
try:
os.makedirs(self.snapshot_dir, 0o0700)
except (IOError, OSError):
pass
# Create __init__.py in case doesn't exist
open(os.path.join(self.snapshot_dir, "__init__.py"), "a").close()
pretty = Formatter(self.imports)
with codecs.open(self.filepath, "w", encoding="utf-8") as snapshot_file:
snapshots_declarations = [
"""snapshots['{}'] = {}""".format(
_escape_quotes(key), pretty(self.snapshots[key])
)
for key in sorted(self.snapshots.keys())
]
imports = "\n".join(
[
"from {} import {}".format(
module, ", ".join(sorted(module_imports))
)
for module, module_imports in sorted(self.imports.items())
]
)
snapshot_file.write(
"""# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
{}
snapshots = Snapshot()
{}
""".format(
imports, "\n\n".join(snapshots_declarations)
)
)
@classmethod
def get_module_for_testpath(cls, test_filepath):
if test_filepath not in cls._snapshot_modules:
dirname = os.path.dirname(test_filepath)
snapshot_dir = os.path.join(dirname, "snapshots")
snapshot_basename = "snap_{}.py".format(
os.path.splitext(os.path.basename(test_filepath))[0]
)
snapshot_filename = os.path.join(snapshot_dir, snapshot_basename)
snapshot_module = "{}".format(os.path.splitext(snapshot_basename)[0])
cls._snapshot_modules[test_filepath] = SnapshotModule(
snapshot_module, snapshot_filename
)
return cls._snapshot_modules[test_filepath]
class SnapshotTest(object):
_current_tester = None
def __init__(self):
self.curr_snapshot = ""
self.snapshot_counter = 1
@property
def module(self):
raise NotImplementedError("module property needs to be implemented")
@property
def update(self):
return False
@property
def test_name(self):
raise NotImplementedError("test_name property needs to be implemented")
def __enter__(self):
SnapshotTest._current_tester = self
return self
def __exit__(self, type, value, tb):
self.save_changes()
SnapshotTest._current_tester = None
def visit(self):
self.module.visit(self.test_name)
def fail(self):
self.module.mark_failed(self.test_name)
def store(self, data):
formatter = Formatter.get_formatter(data)
data = formatter.store(self, data)
self.module[self.test_name] = data
def assert_value_matches_snapshot(self, test_value, snapshot_value):
formatter = Formatter.get_formatter(test_value)
formatter.assert_value_matches_snapshot(
self, test_value, snapshot_value, Formatter()
)
def assert_equals(self, value, snapshot):
assert value == snapshot
def assert_match(self, value, name=""):
self.curr_snapshot = name or self.snapshot_counter
self.visit()
if self.update:
self.store(value)
else:
try:
prev_snapshot = self.module[self.test_name]
except SnapshotNotFound:
self.store(value) # first time this test has been seen
else:
try:
self.assert_value_matches_snapshot(value, prev_snapshot)
except AssertionError:
self.fail()
raise
if not name:
self.snapshot_counter += 1
def save_changes(self):
self.module.save()
def assert_match_snapshot(value, name=""):
if not SnapshotTest._current_tester:
raise Exception(
"You need to use assert_match_snapshot in the SnapshotTest context."
)
SnapshotTest._current_tester.assert_match(value, name)
|
7a8c0f349e7a21c96578ed6842a3b41057419bd5
|
6fdb4eaf5b0e6dbd7db4bf947547541e9aebf110
|
/api/tests/opentrons/protocol_engine/commands/magnetic_module/test_engage.py
|
2bfea51d887ae9226ea83838fce57a0ccf2f25fc
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
Opentrons/opentrons
|
874321e01149184960eeaeaa31b1d21719a1ceda
|
026b523c8c9e5d45910c490efb89194d72595be9
|
refs/heads/edge
| 2023-09-02T02:51:49.579906
| 2023-08-31T16:02:45
| 2023-08-31T16:02:45
| 38,644,841
| 326
| 174
|
Apache-2.0
| 2023-09-14T21:47:20
| 2015-07-06T20:41:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,744
|
py
|
test_engage.py
|
"""Test magnetic module engage commands."""
from decoy import Decoy
from opentrons.hardware_control.modules import MagDeck
from opentrons.protocol_engine.state import StateView
from opentrons.protocol_engine.state.module_substates import (
MagneticModuleId,
MagneticModuleSubState,
)
from opentrons.protocol_engine.execution import EquipmentHandler
from opentrons.protocol_engine.commands.magnetic_module import (
EngageParams,
EngageResult,
)
from opentrons.protocol_engine.commands.magnetic_module.engage import (
EngageImplementation,
)
async def test_magnetic_module_engage_implementation(
decoy: Decoy, state_view: StateView, equipment: EquipmentHandler
) -> None:
"""It should calculate the proper hardware height and engage."""
subject = EngageImplementation(state_view=state_view, equipment=equipment)
params = EngageParams(
moduleId="unverified-module-id",
height=3.14159,
)
verified_module_id = MagneticModuleId("module-id")
magnetic_module_substate = decoy.mock(cls=MagneticModuleSubState)
magnetic_module_hw = decoy.mock(cls=MagDeck)
decoy.when(
state_view.modules.get_magnetic_module_substate("unverified-module-id")
).then_return(magnetic_module_substate)
decoy.when(
magnetic_module_substate.calculate_magnet_hardware_height(mm_from_base=3.14159)
).then_return(9001)
decoy.when(magnetic_module_substate.module_id).then_return(verified_module_id)
decoy.when(equipment.get_module_hardware_api(verified_module_id)).then_return(
magnetic_module_hw
)
result = await subject.execute(params=params)
decoy.verify(await magnetic_module_hw.engage(9001), times=1)
assert result == EngageResult()
|
22a5aee938a3a4d94076969a8b409113c54bb105
|
8988a329c571cb04a5d97c691d0cd8bc4caf81d4
|
/dimod/discrete/discrete_quadratic_model.py
|
65f316079823bc8187f170086e1b33f530c76e68
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
dwavesystems/dimod
|
85329cbee86bdf5a73de05fa25884c877ea53002
|
8433f221a1e79101e1db0d80968ab5a2f59b865d
|
refs/heads/main
| 2023-08-29T08:37:24.565927
| 2023-08-17T17:14:58
| 2023-08-17T17:14:58
| 100,658,303
| 118
| 93
|
Apache-2.0
| 2023-09-13T18:15:37
| 2017-08-18T01:02:17
|
Python
|
UTF-8
|
Python
| false
| false
| 47,473
|
py
|
discrete_quadratic_model.py
|
# Copyright 2020 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections.abc as abc
import io
import json
import warnings
from collections import defaultdict, namedtuple
from typing import List, Tuple, Union, Generator, Iterator
import numpy as np
from numpy.core.shape_base import stack
from dimod.discrete.cydiscrete_quadratic_model import cyDiscreteQuadraticModel
from dimod.sampleset import as_samples
from dimod.serialization.fileview import VariablesSection, _BytesIO, SpooledTemporaryFile
from dimod.serialization.fileview import load, read_header, write_header
from dimod.typing import QuadraticVectors, DQMVectors
from dimod.variables import Variables
LinearTriplets = Union[List[Tuple], Generator[Tuple, None, None]]
__all__ = ['DiscreteQuadraticModel', 'DQM', 'CaseLabelDQM']
# constants for serialization
DQM_MAGIC_PREFIX = b'DIMODDQM'
DATA_MAGIC_PREFIX = b'BIAS'
LegacyDQMVectors = namedtuple(
'LegacyDQMVectors', ['case_starts', 'linear_biases', 'quadratic', 'labels'])
class VariableNeighborhood(abc.Set):
# this really shouldn't be set-like because __contains__ is O(degree(v))
# but for backwards compatiblity we'll leave it.
__slots__ = ('_dqm', '_vi')
def __init__(self, dqm, v):
self._dqm = dqm
self._vi = dqm.variables.index(v) # raises ValueError
def __contains__(self, u):
return self._dqm.variables.index(u) in self._dqm._cydqm.adj[self._vi]
def __iter__(self):
for ui in self._dqm._cydqm.adj[self._vi]:
yield self._dqm.variables[ui]
def __len__(self):
return self._dqm._cydqm.degree(self._vi)
def __repr__(self):
return str(dict(self))
class VariableAdjacency(abc.Mapping):
__slots__ = ('_dqm',)
def __init__(self, dqm):
self._dqm = dqm
def __getitem__(self, v):
return VariableNeighborhood(self._dqm, v)
def __iter__(self):
yield from self._dqm.variables
def __len__(self):
return len(self._dqm.variables)
def __repr__(self):
return str(dict(self))
class DiscreteQuadraticModel:
"""Encodes a discrete quadratic model.
A discrete quadratic model is a polynomial over discrete variables with
terms all of degree two or less.
Examples:
This example constructs a map coloring with Canadian provinces. To
solve the problem we penalize adjacent provinces having the same color.
>>> provinces = ["AB", "BC", "ON", "MB", "NB", "NL", "NS", "NT", "NU",
... "PE", "QC", "SK", "YT"]
>>> borders = [("BC", "AB"), ("BC", "NT"), ("BC", "YT"), ("AB", "SK"),
... ("AB", "NT"), ("SK", "MB"), ("SK", "NT"), ("MB", "ON"),
... ("MB", "NU"), ("ON", "QC"), ("QC", "NB"), ("QC", "NL"),
... ("NB", "NS"), ("YT", "NT"), ("NT", "NU")]
>>> colors = [0, 1, 2, 3]
...
>>> dqm = dimod.DiscreteQuadraticModel()
>>> for p in provinces:
... _ = dqm.add_variable(4, label=p)
>>> for p0, p1 in borders:
... dqm.set_quadratic(p0, p1, {(c, c): 1 for c in colors})
The next examples show how to view and manipulate the model biases.
>>> dqm = dimod.DiscreteQuadraticModel()
Add the variables to the model
>>> u = dqm.add_variable(5) # unlabeled variable with 5 cases
>>> v = dqm.add_variable(3, label='v') # labeled variable with 3 cases
The linear biases default to 0. They can be read by case or by batch.
>>> dqm.get_linear_case(u, 1)
0.0
>>> dqm.get_linear(u)
array([0., 0., 0., 0., 0.])
>>> dqm.get_linear(v)
array([0., 0., 0.])
The linear biases can be overwritten either by case or in a batch.
>>> dqm.set_linear_case(u, 3, 17)
>>> dqm.get_linear(u)
array([ 0., 0., 0., 17., 0.])
>>> dqm.set_linear(v, [0, -1, 3])
>>> dqm.get_linear(v)
array([ 0., -1., 3.])
The quadratic biases can also be manipulated sparsely or densely.
>>> dqm.set_quadratic(u, v, {(0, 2): 1.5})
>>> dqm.get_quadratic(u, v)
{(0, 2): 1.5}
>>> dqm.get_quadratic(u, v, array=True) # as a NumPy array
array([[0. , 0. , 1.5],
[0. , 0. , 0. ],
[0. , 0. , 0. ],
[0. , 0. , 0. ],
[0. , 0. , 0. ]])
>>> dqm.set_quadratic_case(u, 2, v, 1, -3)
>>> dqm.get_quadratic(u, v, array=True)
array([[ 0. , 0. , 1.5],
[ 0. , 0. , 0. ],
[ 0. , -3. , 0. ],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ]])
>>> dqm.get_quadratic(u, v) # doctest:+SKIP
{(0, 2): 1.5, (2, 1): -3.0}
"""
def __init__(self):
self.variables = Variables()
self._cydqm = cyDiscreteQuadraticModel()
variables = None # overwritten by __init__, here for the docstring
""":class:`~.variables.Variables` of variable labels."""
@property
def adj(self):
"""dict[hashable, set]: The adjacency structure of the variables."""
try:
return self._adj
except AttributeError:
pass
self._adj = adj = VariableAdjacency(self)
return adj
@property
def offset(self):
return self._cydqm.offset
@offset.setter
def offset(self, offset: float):
self._cydqm.offset = offset
def add_linear_equality_constraint(self, terms: LinearTriplets,
lagrange_multiplier: float,
constant: float):
"""Add a linear constraint as a quadratic objective.
Adds a linear constraint of the form
:math:`\sum_{i,k} a_{i,k} x_{i,k} + C = 0`
to the discrete quadratic model as a quadratic objective.
Args:
terms: A list of tuples of the type (variable, case, bias).
Each tuple is evaluated to the term (bias * variable_case).
All terms in the list are summed.
lagrange_multiplier: The coefficient or the penalty strength
constant: The constant value of the constraint.
"""
index_terms = ((self.variables.index(v), c, x) for v, c, x in terms)
self._cydqm.add_linear_equality_constraint(
index_terms, lagrange_multiplier, constant)
def add_linear_inequality_constraint(self, terms: LinearTriplets,
lagrange_multiplier: float,
label: str,
constant: int = 0,
lb: int = np.iinfo(np.int64).min,
ub: int = 0,
slack_method: str = "log2",
cross_zero: bool = False)\
-> LinearTriplets:
"""Add a linear inequality constraint as a quadratic objective.
Adds a linear inequality constraint of the form:
math:'lb <= \sum_{i,k} a_{i,k} x_{i,k} + constant <= ub'
to the discrete quadratic model as a quadratic objective.
Coefficients should be integers.
For constraints with fractional coefficients, multiply both sides of
the inequality by an appropriate factor of ten to attain or approximate
integer coefficients.
Args:
terms:
A list of tuples of the type (variable, case, bias).
Each tuple is evaluated to the term (bias * variable_case).
All terms in the list are summed.
lagrange_multiplier:
A weight or the penalty strength. This value is multiplied by
the entire constraint objective and added to the
discrete quadratic model (it doesn't appear explicitly in the
+ equation above).
label:
Prefix used to label the slack variables used to create the new
objective.
constant:
The constant value of the constraint.
lb:
lower bound for the constraint
ub:
upper bound for the constraint
slack_method:
"The method for adding slack variables. Supported methods are:
- log2: Adds up to log2(ub - lb) number of dqm variables each
with two cases to the constraint.
- log10: Adds log10 dqm variables each with up to 10 cases.
- linear: Adds one dqm variable for each constraint with linear
number of cases.
cross_zero:
When True, adds zero to the domain of constraint
Returns:
slack_terms: A list of tuples of the type (variable, case, bias)
for the new slack variables.
Each tuple is evaluated to the term (bias * variable_case).
All terms in the list are summed.
"""
if slack_method not in ['log2', 'log10', 'linear']:
raise ValueError(
"expected slack_method to be 'log2', 'log10' or 'linear' "
f"but got {slack_method!r}")
if isinstance(terms, Iterator):
terms = list(terms)
if int(constant) != constant or int(lb) != lb or int(ub) != ub or any(
int(bias) != bias for _, _, bias in terms):
warnings.warn("For constraints with fractional coefficients, "
"multiply both sides of the inequality by an "
"appropriate factor of ten to attain or "
"approximate integer coefficients. ")
terms_upper_bound = sum(v for _, _, v in terms if v > 0)
terms_lower_bound = sum(v for _, _, v in terms if v < 0)
ub_c = min(terms_upper_bound, ub - constant)
lb_c = max(terms_lower_bound, lb - constant)
if terms_upper_bound <= ub_c and terms_lower_bound >= lb_c:
warnings.warn(
f'Did not add constraint {label}.'
' This constraint is feasible'
' with any value for state variables.')
return []
if ub_c < lb_c:
raise ValueError(
f'The given constraint ({label}) is infeasible with any value'
' for state variables.')
slack_upper_bound = int(ub_c - lb_c)
if slack_upper_bound == 0:
self.add_linear_equality_constraint(terms, lagrange_multiplier,
-ub_c)
return []
else:
slack_terms = []
zero_constraint = False
if cross_zero:
if lb_c > 0 or ub_c < 0:
zero_constraint = True
if slack_method == "log2":
num_slack = int(np.floor(np.log2(slack_upper_bound)))
slack_coefficients = [2 ** j for j in range(num_slack)]
if slack_upper_bound - 2 ** num_slack >= 0:
slack_coefficients.append(
slack_upper_bound - 2 ** num_slack + 1)
for j, s in enumerate(slack_coefficients):
sv = self.add_variable(2, f'slack_{label}_{j}')
slack_terms.append((sv, 1, s))
if zero_constraint:
sv = self.add_variable(2, f'slack_{label}_{num_slack + 1}')
slack_terms.append((sv, 1, ub_c))
elif slack_method == "log10":
num_dqm_vars = int(np.ceil(np.log10(slack_upper_bound+1)))
for j in range(num_dqm_vars):
slack_term = list(range(0, min(slack_upper_bound + 1,
10 ** (j + 1)), 10 ** j))[1:]
if j < num_dqm_vars - 1 or not zero_constraint:
sv = self.add_variable(len(slack_term) + 1,
f'slack_{label}_{j}')
else:
sv = self.add_variable(len(slack_term) + 2,
f'slack_{label}_{j}')
for i, val in enumerate(slack_term):
slack_terms.append((sv, i + 1, val))
if zero_constraint:
slack_terms.append((sv, len(slack_term) + 1, ub_c))
elif slack_method == 'linear':
slack_term = list(range(1, slack_upper_bound + 1))
if not zero_constraint:
sv = self.add_variable(len(slack_term) + 1,
f'slack_{label}')
else:
sv = self.add_variable(len(slack_term) + 2,
f'slack_{label}')
for i, val in enumerate(slack_term):
slack_terms.append((sv, i + 1, val))
if zero_constraint:
slack_terms.append((sv, len(slack_term) + 1, ub_c))
self.add_linear_equality_constraint(terms + slack_terms,
lagrange_multiplier, -ub_c)
return slack_terms
def add_variable(self, num_cases, label=None):
"""Add a discrete variable.
Args:
num_cases (int):
The number of cases in the variable. Must be a positive
integer.
label (hashable, optional):
A label for the variable. Can be any hashable except `None`.
Defaults to the length of the discrete quadratic model, if that
label is available. Otherwise defaults to the lowest available
positive integer label.
Returns:
The label of the new variable.
Raises:
ValueError: If `label` already exists as a variable label.
TypeError: If `label` is not hashable.
"""
self.variables._append(label)
variable_index = self._cydqm.add_variable(num_cases)
assert variable_index + 1 == len(self.variables)
return self.variables[-1]
# todo: support __copy__ and __deepcopy__
def copy(self):
"""Return a copy of the discrete quadratic model."""
new = type(self)()
new._cydqm = self._cydqm.copy()
for v in self.variables:
new.variables._append(v)
return new
def degree(self, v):
return self._cydqm.degree(self.variables.index(v))
def energy(self, sample):
energy, = self.energies(sample)
return energy
def energies(self, samples):
samples, labels = as_samples(samples, dtype=self._cydqm.case_dtype)
# reorder as needed
if len(labels) != self.num_variables():
raise ValueError(
"Given sample(s) have incorrect number of variables")
if self.variables != labels:
# need to reorder the samples
label_to_idx = dict((v, i) for i, v in enumerate(labels))
try:
order = [label_to_idx[v] for v in self.variables]
except KeyError:
raise ValueError("given samples-like does not match labels")
samples = samples[:, order]
return np.asarray(self._cydqm.energies(samples))
@classmethod
def _from_file_numpy(cls, file_like):
magic = file_like.read(len(DATA_MAGIC_PREFIX))
if magic != DATA_MAGIC_PREFIX:
raise ValueError("unknown file type, expected magic string {} but "
"got {}".format(DATA_MAGIC_PREFIX, magic))
length = np.frombuffer(file_like.read(4), '<u4')[0]
start = file_like.tell()
data = np.load(file_like)
obj = cls.from_numpy_vectors(data['case_starts'],
data['linear_biases'],
(data['quadratic_row_indices'],
data['quadratic_col_indices'],
data['quadratic_biases'],
),
offset=data.get('offset', 0),
)
# move to the end of the data section
file_like.seek(start+length, io.SEEK_SET)
return obj
@classmethod
def from_file(cls, file_like):
"""Construct a DQM from a file-like object.
The inverse of :meth:`~DiscreteQuadraticModel.to_file`.
"""
if isinstance(file_like, (bytes, bytearray, memoryview)):
file_like = _BytesIO(file_like)
header_info = read_header(file_like, DQM_MAGIC_PREFIX)
version = header_info.version
header_data = header_info.data
if version >= (2, 0):
raise ValueError("cannot load a DQM serialized with version "
f"{version!r}, try upgrading your dimod version")
obj = cls._from_file_numpy(file_like)
if header_data['variables']:
obj.variables = Variables()
for v in VariablesSection.load(file_like):
obj.variables._append(v)
if len(obj.variables) != obj.num_variables():
raise ValueError("mismatched labels to BQM in given file")
return obj
@classmethod
def from_numpy_vectors(cls, case_starts, linear_biases, quadratic,
labels=None, offset=0):
"""Construct a DQM from five numpy vectors.
Args:
case_starts (array-like): A length
:meth:`~DiscreteQuadraticModel.num_variables` array. The cases
associated with variable `v` are in the range `[case_starts[v],
cases_starts[v+1])`.
linear_biases (array-like): A length
:meth:`~DiscreteQuadraticModel.num_cases` array. The linear
biases.
quadratic (tuple): A three tuple containing:
- `irow`: A length
:meth:`~DiscreteQuadraticModel.num_case_interactions` array. If
the case interactions were defined in a sparse matrix, these
would be the row indices.
- `icol`: A length
:meth:`~DiscreteQuadraticModel.num_case_interactions` array. If
the case interactions were defined in a sparse matrix, these
would be the column indices.
- `quadratic_biases`: A length
:meth:`~DiscreteQuadraticModel.num_case_interactions` array. If
the case interactions were defined in a sparse matrix, these
would be the values.
labels (list, optional):
The variable labels. Defaults to index-labeled.
offset (float):
Energy offset of the DQM.
Example:
>>> dqm = dimod.DiscreteQuadraticModel()
>>> u = dqm.add_variable(5)
>>> v = dqm.add_variable(3, label='3var')
>>> dqm.set_quadratic(u, v, {(0, 2): 1})
>>> vectors = dqm.to_numpy_vectors()
>>> new = dimod.DiscreteQuadraticModel.from_numpy_vectors(*vectors)
See Also:
:meth:`~DiscreteQuadraticModel.to_numpy_vectors`
"""
obj = cls()
obj._cydqm = cyDiscreteQuadraticModel.from_numpy_vectors(
case_starts, linear_biases, quadratic, offset)
if labels is not None:
if len(labels) != obj._cydqm.num_variables():
raise ValueError(
"labels does not match the length of the DQM"
)
for v in labels:
obj.variables._append(v)
else:
for v in range(obj._cydqm.num_variables()):
obj.variables._append()
return obj
def get_cases(self, v):
"""The cases of variable `v` as a sequence"""
return range(self.num_cases(v))
def get_linear(self, v):
"""The linear biases associated with variable `v`.
Args:
v: A variable in the discrete quadratic model.
Returns:
:class:`~numpy.ndarray`: The linear biases in an array.
"""
return self._cydqm.get_linear(self.variables.index(v))
def get_linear_case(self, v, case):
"""The linear bias associated with case `case` of variable `v`.
Args:
v: A variable in the discrete quadratic model.
case (int): The case of `v`.
Returns:
The linear bias.
"""
return self._cydqm.get_linear_case(self.variables.index(v), case)
def get_quadratic(self, u, v, array=False):
"""The biases associated with the interaction between `u` and `v`.
Args:
u: A variable in the discrete quadratic model.
v: A variable in the discrete quadratic model.
array (bool, optional, default=False): If True, a dense array is
returned rather than a dict.
Returns:
The quadratic biases. If `array=False`, returns a dictionary of the
form `{case_u, case_v: bias, ...}`
If `array=True`, returns a
:meth:`~DiscreteQuadraticModel.num_cases(u)` by
:meth:`~DiscreteQuadraticModel.num_cases(v)` numpy array.
"""
return self._cydqm.get_quadratic(
self.variables.index(u),
self.variables.index(v),
array=array)
def get_quadratic_case(self, u, u_case, v, v_case):
"""The bias associated with the interaction between two cases of `u`
and `v`.
Args:
u: A variable in the discrete quadratic model.
u_case (int): The case of `u`.
v: A variable in the discrete quadratic model.
v_case (int): The case of `v`.
Returns:
The quadratic bias.
"""
return self._cydqm.get_quadratic_case(
self.variables.index(u), u_case, self.variables.index(v), v_case)
def num_cases(self, v=None):
"""If v is provided, the number of cases associated with v, otherwise
the total number of cases in the DQM.
"""
if v is None:
return self._cydqm.num_cases()
return self._cydqm.num_cases(self.variables.index(v))
def num_case_interactions(self):
"""The total number of case interactions."""
return self._cydqm.num_case_interactions()
def num_variable_interactions(self):
"""The total number of variable interactions"""
return self._cydqm.num_variable_interactions()
def num_variables(self):
"""The number of variables in the discrete quadratic model."""
return self._cydqm.num_variables()
def relabel_variables(self, mapping, inplace=True):
if not inplace:
return self.copy().relabel_variables(mapping, inplace=True)
self.variables._relabel(mapping)
return self
def relabel_variables_as_integers(self, inplace=True):
"""Relabel the variables of the DQM to integers.
Args:
inplace (bool, optional, default=True):
If True, the discrete quadratic model is updated in-place;
otherwise, a new discrete quadratic model is returned.
Returns:
tuple: A 2-tuple containing:
A discrete quadratic model with the variables relabeled. If
`inplace` is set to True, returns itself.
dict: The mapping that will restore the original labels.
"""
if not inplace:
return self.copy().relabel_variables_as_integers(inplace=True)
return self, self.variables._relabel_as_integers()
def set_linear(self, v, biases):
"""Set the linear biases associated with `v`.
Args:
v: A variable in the discrete quadratic model.
biases (array-like): The linear biases in an array.
"""
self._cydqm.set_linear(self.variables.index(v), np.asarray(biases))
def set_linear_case(self, v, case, bias):
"""The linear bias associated with case `case` of variable `v`.
Args:
v: A variable in the discrete quadratic model.
case (int): The case of `v`.
bias (float): The linear bias.
"""
self._cydqm.set_linear_case(self.variables.index(v), case, bias)
def set_quadratic(self, u, v, biases):
"""Set biases associated with the interaction between `u` and `v`.
Args:
u: A variable in the discrete quadratic model.
v: A variable in the discrete quadratic model.
biases (array-like/dict):
The quadratic biases. If a dict, then a dictionary of the
form `{case_u, case_v: bias, ...}`. Otherwise, then should be,
a :meth:`~DiscreteQuadraticModel.num_cases(u)` by
:meth:`~DiscreteQuadraticModel.num_cases(v)` array-like.
"""
self._cydqm.set_quadratic(
self.variables.index(u),
self.variables.index(v),
biases)
def set_quadratic_case(self, u, u_case, v, v_case, bias):
"""Set the bias associated with the interaction between two cases of
`u` and `v`.
Args:
u: A variable in the discrete quadratic model.
u_case (int): The case of `u`.
v: A variable in the discrete quadratic model.
v_case (int): The case of `v`.
bias (float): The quadratic bias.
"""
self._cydqm.set_quadratic_case(
self.variables.index(u), u_case,
self.variables.index(v), v_case,
bias)
def _to_file_numpy(self, file, compress):
# the biases etc, saved using numpy
# we'd like to just let numpy handle the header etc, but it doesn't
# do a good job of cleaning up after itself in np.load, so we record
# the section length ourselves
file.write(DATA_MAGIC_PREFIX)
file.write(b' ') # will be replaced by the length
start = file.tell()
vectors = self.to_numpy_vectors(return_offset=True)
if compress:
save = np.savez_compressed
else:
save = np.savez
save(file,
case_starts=vectors.case_starts,
linear_biases=vectors.linear_biases,
quadratic_row_indices=vectors.quadratic.row_indices,
quadratic_col_indices=vectors.quadratic.col_indices,
quadratic_biases=vectors.quadratic.biases,
offset=vectors.offset,
)
# record the length
end = file.tell()
file.seek(start-4)
file.write(np.dtype('<u4').type(end - start).tobytes())
file.seek(end)
def to_file(self, *, compress=False, compressed=None, ignore_labels=False,
spool_size=int(1e9)):
"""Convert the DQM to a file-like object.
Args:
compress (bool, optional default=False):
If True, most of the data will be compressed.
compressed (bool, optional default=None):
Deprecated; please use ``compress`` instead.
ignore_labels (bool, optional, default=False):
Treat the DQM as unlabeled. This is useful for large DQMs to
save on space.
spool_size (int, optional, default=int(1e9)):
Defines the `max_size` passed to the constructor of
:class:`tempfile.SpooledTemporaryFile`. Determines whether
the returned file-like's contents will be kept on disk or in
memory.
Returns:
A file-like object that can be used to construct a copy of the DQM.
The class is a thin wrapper of
:class:`tempfile.SpooledTemporaryFile` that includes some
methods from :class:`io.IOBase`
Format Specification (Version 1.0):
This format is inspired by the `NPY format`_
**Header**
The first 8 bytes are a magic string: exactly ``"DIMODDQM"``.
The next 1 byte is an unsigned byte: the major version of the file
format.
The next 1 byte is an unsigned byte: the minor version of the file
format.
The next 4 bytes form a little-endian unsigned int, the length of
the header data `HEADER_LEN`.
The next ``HEADER_LEN`` bytes form the header data. This is a
json-serialized dictionary. The dictionary is exactly:
.. code-block:: python
dict(num_variables=dqm.num_variables(),
num_cases=dqm.num_cases(),
num_case_interactions=dqm.num_case_interactions(),
num_variable_interactions=dqm.num_variable_interactions(),
variables=not (ignore_labels or dqm.variables.is_range),
)
it is padded with spaces to make the entire length of the header
divisible by 64.
**DQM Data**
The first 4 bytes are exactly `"BIAS"`
The next 4 bytes form a little-endian unsigned int, the length of
the DQM data ``DATA_LEN``.
The next ``DATA_LEN`` bytes are the vectors as returned by
:meth:`DiscreteQuadraticModel.to_numpy_vectors` saved using
:func:`numpy.save`.
**Variable Data**
The first 4 bytes are exactly ``"VARS"``.
The next 4 bytes form a little-endian unsigned int, the length of
the variables array ``VARIABLES_LENGTH``.
The next VARIABLES_LENGTH bytes are a json-serialized array. As
constructed by ``json.dumps(list(bqm.variables))``.
.. _NPY format: https://numpy.org/doc/stable/reference/generated/numpy.lib.format.html
See Also:
:meth:`DiscreteQuadraticModel.from_file`
.. deprecated:: 0.9.9
The ``compressed`` keyword argument will be removed in dimod 0.12.0.
Use ``compress`` instead.
"""
file = SpooledTemporaryFile(max_size=spool_size)
index_labeled = ignore_labels or self.variables.is_range
data = dict(num_variables=self.num_variables(),
num_cases=self.num_cases(),
num_case_interactions=self.num_case_interactions(),
num_variable_interactions=self.num_variable_interactions(),
variables=not index_labeled,
)
write_header(file, DQM_MAGIC_PREFIX, data, version=(1, 1))
# the section containing most of the data, encoded with numpy
if compressed is not None:
warnings.warn(
"Argument 'compressed' is deprecated since dimod 0.9.9 "
"and will be removed in 0.12.0. "
"Use 'compress' instead.",
DeprecationWarning, stacklevel=2
)
compress = compressed or compress
self._to_file_numpy(file, compress)
if not index_labeled:
file.write(VariablesSection(self.variables).dumps())
file.seek(0)
return file
def to_numpy_vectors(self, return_offset: bool = False):
"""Convert the DQM to five numpy vectors and the labels.
Args:
return_offset: Boolean flag to optionally return energy offset value.
Returns:
:class:`DQMVectors`: A named tuple with fields `['case_starts',
'linear_biases', 'quadratic', 'labels']`.
- `case_starts`: A length
:meth:`~DiscreteQuadraticModel.num_variables` array. The cases
associated with variable `v` are in the range `[case_starts[v],
cases_starts[v+1])`.
- `linear_biases`: A length
:meth:`~DiscreteQuadraticModel.num_cases` array. The linear
biases.
- `quadratic`: A named tuple with fields `['row_indices',
'col_indices', 'biases']`.
* `row_indices`: A length
:meth:`~DiscreteQuadraticModel.num_case_interactions` array. If
the case interactions were defined in a sparse matrix, these
would be the row indices.
* `col_indices`: A length
:meth:`~DiscreteQuadraticModel.num_case_interactions` array. If
the case interactions were defined in a sparse matrix, these
would be the column indices.
* `biases`: A length
:meth:`~DiscreteQuadraticModel.num_case_interactions` array. If
the case interactions were defined in a sparse matrix, these
would be the values.
- `labels`: The variable labels in a
:class:`~collections.abc.Sequence`.
If `return_labels=True`, this method will instead return a tuple
`(case_starts, linear_biases, (irow, icol, qdata), labels)` where
`labels` is a list of the variable labels.
See Also:
:meth:`~DiscreteQuadraticModel.from_numpy_vectors`
"""
if not return_offset:
warnings.warn(
"`return_offset` will default to `True` in the future.", DeprecationWarning,
stacklevel=2
)
case_starts, linear_biases, quadratic = self._cydqm.to_numpy_vectors()
return LegacyDQMVectors(case_starts,
linear_biases,
QuadraticVectors(*quadratic),
self.variables)
case_starts, linear_biases, quadratic, offset = self._cydqm.to_numpy_vectors(return_offset)
return DQMVectors(
case_starts, linear_biases, QuadraticVectors(*quadratic), self.variables, offset
)
DQM = DiscreteQuadraticModel # alias
# register fileview loader
load.register(DQM_MAGIC_PREFIX, DiscreteQuadraticModel.from_file)
class CaseLabelDQM(DQM):
'''DiscreteQuadraticModel that allows assignment of arbitrary labels to
cases of discrete variables.
Two types of case labels are offered:
1. Unique case labels are unique among variable labels and themselves.
2. Shared case labels are unique among cases for a variable, but may be
reused among variables.
Examples:
Declare variables with unique case labels.
>>> dqm = dimod.CaseLabelDQM()
>>> dqm.add_variable({'x1', 'x2', 'x3'})
0
>>> dqm.add_variable(['y1', 'y2', 'y3'])
1
Set linear biases
>>> dqm.set_linear('x1', 0.5)
>>> dqm.set_linear('y1', 1.5)
Set quadratic biases
>>> dqm.set_quadratic('x2', 'y3', -0.5)
>>> dqm.set_quadratic('x3', 'y2', -1.5)
Declare variables with shared case labels.
>>> u = dqm.add_variable({'red', 'green', 'blue'}, shared_labels=True)
>>> v = dqm.add_variable(['blue', 'yellow', 'brown'], label='v', shared_labels=True)
Set linear biases
>>> dqm.set_linear_case(u, 'red', 1)
>>> dqm.set_linear_case(v, 'yellow', 2)
Set quadratic biases
>>> dqm.set_quadratic_case(u, 'green', v, 'blue', -0.5)
>>> dqm.set_quadratic_case(u, 'blue', v, 'brown', -0.5)
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._shared_case_label = defaultdict(dict)
self._shared_label_case = defaultdict(dict)
self._unique_case_label = {}
self._unique_label_case = {}
self._unique_label_vars = set()
def add_variable(self, cases, label=None, shared_labels=False):
"""Add a discrete variable to the model.
Args:
cases (int or iterable):
The number of cases in the variable, or an iterable containing
the labels that will identify the cases of the variable. Case
labels can be any hashable.
label (hashable, optional):
A label for the variable. Can be any hashable except `None`.
Defaults to the length of the discrete quadratic model, if that
label is available. Otherwise defaults to the lowest available
positive integer label.
shared_labels (bool, optional, default=False):
If True and `cases` is an iterable, shared case labels are
created. If False and `cases` is an iterable, unique case
labels are created. If `cases` is not an iterable, ignored.
Returns:
The label of the new variable.
Raises:
ValueError: If `label` already exists as a variable label, or if
any of the case labels is not unique.
TypeError: If `label` is not hashable, or if any of the case labels
is not hashable.
"""
if label in self._unique_label_case:
raise ValueError(f'variable label {label} is not unique')
if isinstance(cases, int):
return super().add_variable(cases, label=label)
else:
if len(set(cases)) != len(cases):
raise ValueError('case labels are not unique')
if shared_labels:
var = super().add_variable(len(cases), label=label)
for k, case in enumerate(cases):
self._shared_label_case[var][case] = k
self._shared_case_label[var][k] = case
else:
for case in cases:
if (case in self.variables) or (case in self._unique_label_case):
raise ValueError(f'case label {case} is not unique')
var = super().add_variable(len(cases), label=label)
self._unique_label_vars.add(var)
for k, case in enumerate(cases):
self._unique_label_case[case] = (var, k)
self._unique_case_label[(var, k)] = case
return var
def _lookup_shared_case(self, v, case):
"""Translate shared case label `case` of variable `v` to integer.
Raises:
ValueError: If `case` of `v` is unknown.
"""
map_ = self._shared_label_case.get(v)
if map_:
if case not in map_:
raise ValueError(f'unknown case {case} of variable {v}')
return map_[case]
return case
def get_linear(self, v):
"""The linear biases associated with variable `v`.
Args:
v: A variable in the discrete quadratic model, or a unique case
label.
Returns:
The linear biases. If `v` is a variable, returns a NumPy array of
size :meth:`~DiscreteQuadraticModel.num_cases(v)` by 1.
If `v` is a unique case label, returns a float.
"""
v_k = self._unique_label_case.get(v)
if v_k:
return super().get_linear_case(*v_k)
else:
return super().get_linear(v)
def get_linear_case(self, v, case):
"""The linear bias associated with case `case` of variable `v`.
Args:
v: A variable in the discrete quadratic model.
case: The case of `v`.
Returns:
The linear bias.
"""
case = self._lookup_shared_case(v, case)
return super().get_linear_case(v, case)
def get_quadratic(self, u, v, array=False):
"""The biases associated with the interaction between `u` and `v`.
Args:
u: A variable in the discrete quadratic model, or a unique case
label. If `u` is a unique case label, `v` must be a unique
case label.
v: A variable in the discrete quadratic model, or a unique case
label. If `u` is a unique case label, `v` must be a unique
case label.
array (bool, optional, default=False): If True and `u` and `v` are
variables, a dense array is returned rather than a dict. If
`u` and `v` are unique case labels, ignored.
Returns:
The quadratic biases. If `array=False` and `u` and `v` are
variables, returns a dictionary of the form
`{case_u, case_v: bias, ...}`
If `array=True` and `u` and `v` are variables, returns a NumPy
array of size :meth:`~DiscreteQuadraticModel.num_cases(u)` by
:meth:`~DiscreteQuadraticModel.num_cases(v)`.
If `u` and `v` are unique case labels, returns a float.
Raises:
ValueError: If `u` is a unique case label and `v` is not.
"""
u_k = self._unique_label_case.get(u)
if u_k:
if v not in self._unique_label_case:
raise ValueError(f'unknown case label {v}')
v_m = self._unique_label_case[v]
return super().get_quadratic_case(*u_k, *v_m)
else:
return super().get_quadratic(u, v)
def get_quadratic_case(self, u, u_case, v, v_case):
"""The bias associated with the interaction between two cases of `u`
and `v`.
Args:
u: A variable in the discrete quadratic model.
u_case: The case of `u`.
v: A variable in the discrete quadratic model.
v_case: The case of `v`.
Returns:
The quadratic bias.
"""
u_case = self._lookup_shared_case(u, u_case)
v_case = self._lookup_shared_case(v, v_case)
return super().get_quadratic_case(u, u_case, v, v_case)
def set_linear(self, v, biases):
"""Set the linear biases associated with `v`.
Args:
v: A variable in the discrete quadratic model, or a unique case
label.
biases (float or array-like): If `v` is a variable, the linear
biases is an array. Otherwise, the linear bias is a real
number.
"""
v_k = self._unique_label_case.get(v)
if v_k:
super().set_linear_case(*v_k, biases)
else:
super().set_linear(v, biases)
def set_linear_case(self, v, case, bias):
"""The linear bias associated with case `case` of variable `v`.
Args:
v: A variable in the discrete quadratic model.
case: The case of `v`.
bias (float): The linear bias.
"""
case = self._lookup_shared_case(v, case)
super().set_linear_case(v, case, bias)
def set_quadratic(self, u, v, biases):
"""Set biases associated with the interaction between `u` and `v`.
Args:
u: A variable in the discrete quadratic model, or a unique case
label. If `u` is a unique case label, `v` must be a unique
case label.
v: A variable in the discrete quadratic model, or a unique case
label. If `u` is a unique case label, `v` must be a unique
case label.
biases (float or array-like/dict):
The quadratic biases. If `u` and `v` are variables, then
`biases` may be a dictionary of the form
`{case_u, case_v: bias, ...}` or a
:meth:`~DiscreteQuadraticModel.num_cases(u)` by
:meth:`~DiscreteQuadraticModel.num_cases(v)` array-like.
If `u` and `v` are unique case labels, the quadratic bias is a
real number.
Raises:
ValueError: If `u` is a unique case label and `v` is not.
"""
u_k = self._unique_label_case.get(u)
if u_k:
if v not in self._unique_label_case:
raise ValueError(f'unknown case label {v}')
v_m = self._unique_label_case[v]
super().set_quadratic_case(*u_k, *v_m, biases)
else:
super().set_quadratic(u, v, biases)
def set_quadratic_case(self, u, u_case, v, v_case, bias):
"""Set the bias associated with the interaction between two cases of
variables `u` and `v`.
Args:
u: A variable in the discrete quadratic model.
u_case: The case of `u`.
v: A variable in the discrete quadratic model.
v_case: The case of `v`.
bias (float): The quadratic bias.
"""
u_case = self._lookup_shared_case(u, u_case)
v_case = self._lookup_shared_case(v, v_case)
super().set_quadratic_case(u, u_case, v, v_case, bias)
def get_cases(self, v):
"""The cases of variable `v`.
Returns:
List of case labels for `v`, if case labels exist for `v`.
If case labels do not exist for `v`, returns a list of integers
from `0` to :meth:`~DiscreteQuadraticModel.num_cases(v)` - 1.
"""
range_ = range(self.num_cases(v))
map_ = self._shared_case_label.get(v)
if map_:
return [map_[case] for case in range_]
elif v in self._unique_label_vars:
return [self._unique_case_label[(v, case)] for case in range_]
else:
return list(range_)
def to_file(self, *, ignore_labels=False, **kwargs):
# We keep the default value the same as the super class, but if
# we're ignoring the labels, the serialization is identical to
# that of the unlabelled DQM
if ignore_labels:
return super().to_file(ignore_labels=True, **kwargs)
raise NotImplementedError("serialization for CaseLabelDQM is not implemented, "
"try using ignore_labels=True")
def map_sample(self, sample):
"""Transform a sample to reflect case labels.
Args:
sample (dict): The sample to transform.
Returns:
The transformed sample.
"""
new_sample = {}
for var, value in sample.items():
map_ = self._shared_case_label.get(var)
if map_:
new_sample[var] = map_[value]
elif var in self._unique_label_vars:
for case in range(self.num_cases(var)):
new_sample[self._unique_case_label[(var, case)]] = (value == case)
else:
new_sample[var] = value
return new_sample
|
239507ec05821fb3afa1ae0e1ce1b29b2de9b8a0
|
d8f7ed6a82a32e81a3660681795ee7437b2b204c
|
/tools/metrics/utils/backend.py
|
4f05d63dbff9839b27afdbf04b758d63b9614e16
|
[
"Apache-2.0",
"BSL-1.0"
] |
permissive
|
ivafanas/sltbench
|
6ec8bedb0463706598173a781cc08165554aaea3
|
ec702203f406d3b1db71dac6bd39337d175cdc2c
|
refs/heads/master
| 2023-02-13T15:54:37.804010
| 2023-01-26T17:20:03
| 2023-01-27T03:26:01
| 61,478,446
| 153
| 13
|
BSL-1.0
| 2023-01-27T03:26:02
| 2016-06-19T12:12:16
|
C++
|
UTF-8
|
Python
| false
| false
| 1,554
|
py
|
backend.py
|
import parsers
_SLTBENCH_CPPMAIN = '''
#include <sltbench/Bench.h>
SLTBENCH_MAIN();
'''
_GOOGLEBENCH_CPPMAIN = '''
#include <benchmark/benchmark.h>
BENCHMARK_MAIN();
'''
_NONIUS_CPPMAIN = '''
#define NONIUS_RUNNER
#include <nonius/nonius.h++>
#include <nonius/main.h++>
'''
SLTBENCH = 'sltbench'
GOOGLEBENCH = 'googlebench'
NONIUS = 'nonius'
ALL = [SLTBENCH, GOOGLEBENCH, NONIUS]
class BackendSLTBench:
def __init__(self, install_path):
self.install_path = install_path
self.is_header_only = False
self.static_lib_name = 'sltbench'
self.required_static_libs = []
self.maincpp_code = _SLTBENCH_CPPMAIN
self.option_reporter = '--reporter=json'
self.result_parser = parsers.PerfResultsParserSLTBench()
class BackendGooglebench:
def __init__(self, install_path):
self.install_path = install_path
self.is_header_only = False
self.static_lib_name = 'benchmark'
self.required_static_libs = ['pthread']
self.maincpp_code = _GOOGLEBENCH_CPPMAIN
self.option_reporter = '--benchmark_format=json'
self.result_parser = parsers.PerfResultsParserGoogleBench()
class BackendNonius:
def __init__(self, install_path):
self.install_path = install_path
self.is_header_only = True
self.static_lib_name = None
self.required_static_libs = ['pthread']
self.maincpp_code = _NONIUS_CPPMAIN
self.option_reporter = '--reporter=junit'
self.result_parser = parsers.PerfResultsParserNonius()
|
7d8ba4c4e280f2e08c83e0524d74eccfa2e2fd71
|
afbae26b958b5ef20548402a65002dcc8e55b66a
|
/release/stubs.min/Autodesk/Revit/DB/__init___parts/ParameterValueProvider.py
|
cd6ce9a51218c252a0ac2da7af40861c257bd068
|
[
"MIT"
] |
permissive
|
gtalarico/ironpython-stubs
|
d875cb8932c7644f807dc6fde9dd513d159e4f5c
|
c7f6a6cb197e3949e40a4880a0b2a44e72d0a940
|
refs/heads/master
| 2023-07-12T01:43:47.295560
| 2022-05-23T18:12:06
| 2022-05-23T18:12:06
| 95,340,553
| 235
| 88
|
NOASSERTION
| 2023-07-05T06:36:28
| 2017-06-25T05:30:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,409
|
py
|
ParameterValueProvider.py
|
class ParameterValueProvider(FilterableValueProvider,IDisposable):
"""
Gets the value of a parameter from any element passed to getStringValue,
getDoubleValue,getIntegerValue,or getElementIdValue.
ParameterValueProvider(parameter: ElementId)
"""
def Dispose(self):
""" Dispose(self: FilterableValueProvider,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: FilterableValueProvider,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,parameter):
""" __new__(cls: type,parameter: ElementId) """
pass
Parameter=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The parameter used to provide a string,integer,double-precision,or ElementId
value on request for a given element.
Get: Parameter(self: ParameterValueProvider) -> ElementId
Set: Parameter(self: ParameterValueProvider)=value
"""
|
e980b7a3a974569ee40bb2d0a036c084e019d29c
|
de033d5aba647555fa4fd4844df9b563cfc1e2f4
|
/py/depgraph/infos.py
|
2aa08a1efebc1f61ef012c8b8f138d574b1af041
|
[
"Apache-2.0"
] |
permissive
|
eth-sri/debin
|
16fc0499901149bdc9818f268178569469f197df
|
715771c1e1468eaafbb599d8bf81a19b5b2e22d2
|
refs/heads/master
| 2022-08-14T12:31:13.648564
| 2022-05-20T15:12:01
| 2022-05-20T15:12:01
| 160,524,006
| 392
| 64
|
Apache-2.0
| 2022-06-22T05:14:48
| 2018-12-05T13:40:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,795
|
py
|
infos.py
|
from elements.givs import IntConst, StringConst, SwitchTable, Flag, Insn
from elements.givs import CodeOffset, VirtualElm
from elements.offsets import GivOffset, TempOffset, IndirectOffset
from elements.offsets import DirectOffset, StringArrayOffset
from elements.function import Function
from elements.regs import GivReg, Reg
from elements.ttype import Ttype
def coarse(node):
if isinstance(node, IntConst):
return 'INT'
elif isinstance(node, StringConst):
return 'STR'
elif isinstance(node, SwitchTable):
return 'SWITCH'
elif isinstance(node, Flag):
return '{}'.format(node.base_flag)
elif isinstance(node, Insn):
return node.name
elif isinstance(node, CodeOffset):
return 'CODE'
elif isinstance(node, VirtualElm):
return 'VIRTUAL'
elif isinstance(node, IndirectOffset):
return '{}:O'.format(node.base_pointer)
elif isinstance(node, TempOffset):
return '{}:T'.format(node.base_pointer)
elif isinstance(node, GivOffset):
return node.offset
elif isinstance(node, DirectOffset):
return 'DIRECT'
elif isinstance(node, StringArrayOffset):
return 'SARRAY'
elif isinstance(node, GivReg):
return '{}:GIVR'.format(node.base_register)
elif isinstance(node, Reg):
return '{}:R'.format(node.base_register)
elif isinstance(node, Function):
return 'FUNC'
elif isinstance(node, Ttype):
return '{}:TTYPE'.format(coarse(node.owner))
def fine(node):
if isinstance(node, IntConst):
return 'INT({})({})'.format(node.width, node.value)
elif isinstance(node, StringConst):
return 'STR'
elif isinstance(node, SwitchTable):
return 'SWITCH'
elif isinstance(node, Flag):
return '{}:{}'.format(node.base_flag, node.index)
elif isinstance(node, Insn):
return node.name
elif isinstance(node, CodeOffset):
return 'CODE'
elif isinstance(node, VirtualElm):
return 'VIRTUAL'
elif isinstance(node, IndirectOffset):
return '{}:O:{}'.format(node.base_pointer, node.offset)
elif isinstance(node, TempOffset):
return '{}:T:{}'.format(node.base_pointer, node.offset)
elif isinstance(node, GivOffset):
return node.offset
elif isinstance(node, DirectOffset):
return 'DIRECT'
elif isinstance(node, StringArrayOffset):
return 'SARRAY'
elif isinstance(node, GivReg):
return '{}:GIVR:{}'.format(node.base_register, node.index)
elif isinstance(node, Reg):
return '{}:R:{}'.format(node.base_register, node.index)
elif isinstance(node, Function):
return 'FUNC'
elif isinstance(node, Ttype):
return '{}:TTYPE'.format(fine(node.owner))
INFOS = [coarse]
|
b6384ad400da96a8c38b120bf59aacd0f0da2f00
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/blink/web_tests/external/wpt/webdriver/tests/classic/send_alert_text/send.py
|
df218c803bb0a4f558c8bc1547dc3982af01216b
|
[
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 3,071
|
py
|
send.py
|
import pytest
from webdriver.error import NoSuchAlertException
from webdriver.transport import Response
from tests.support.asserts import assert_error, assert_success
from tests.support.sync import Poll
@pytest.fixture
def page(session, inline):
session.url = inline("""
<script>window.result = window.prompt('Enter Your Name: ', 'Name');</script>
""")
def send_alert_text(session, text=None):
return session.transport.send(
"POST", "session/{session_id}/alert/text".format(**vars(session)),
{"text": text})
def test_null_parameter_value(session, http):
path = "/session/{session_id}/alert/text".format(**vars(session))
with http.post(path, None) as response:
assert_error(Response.from_http(response), "invalid argument")
def test_null_response_value(session, page):
response = send_alert_text(session, "Federer")
value = assert_success(response)
assert value is None
@pytest.mark.parametrize("text", [None, {}, [], 42, True])
def test_invalid_input(session, page, text):
response = send_alert_text(session, text)
assert_error(response, "invalid argument")
def test_no_top_browsing_context(session, closed_window):
response = send_alert_text(session, "Federer")
assert_error(response, "no such window")
def test_no_browsing_context(session, closed_frame):
response = send_alert_text(session, "Federer")
assert_error(response, "no such alert")
def test_no_user_prompt(session):
response = send_alert_text(session, "Federer")
assert_error(response, "no such alert")
@pytest.mark.parametrize("dialog_type", ["alert", "confirm"])
def test_alert_element_not_interactable(session, inline, dialog_type):
session.url = inline("<script>window.{}('Hello');</script>".format(dialog_type))
response = send_alert_text(session, "Federer")
assert_error(response, "element not interactable")
@pytest.mark.parametrize("dialog_type", ["alert", "confirm"])
def test_chained_alert_element_not_interactable(session, inline, dialog_type):
session.url = inline("<script>window.{}('Hello');</script>".format(dialog_type))
session.alert.accept()
session.url = inline("<script>window.{}('Hello');</script>".format(dialog_type))
response = send_alert_text(session, "Federer")
assert_error(response, "element not interactable")
@pytest.mark.parametrize("text", ["", "Federer", " Fed erer ", "Fed\terer"])
def test_send_alert_text(session, page, text):
send_response = send_alert_text(session, text)
assert_success(send_response)
session.alert.accept()
assert session.execute_script("return window.result") == text
def test_unexpected_alert(session):
session.execute_script("setTimeout(function() { prompt('Hello'); }, 100);")
wait = Poll(
session,
timeout=5,
ignored_exceptions=NoSuchAlertException,
message="No user prompt with text 'Hello' detected")
wait.until(lambda s: s.alert.text == "Hello")
response = send_alert_text(session, "Federer")
assert_success(response)
|
5ccb61d6b5a4b4bfe9d5982c1e9cef7d962a234b
|
831c1e735a1b7d1bc6aa779bba88d3d3efe42565
|
/setup.py
|
4d1bd2ec7eac7376918bd196ce8997f5e410ab63
|
[
"Apache-2.0"
] |
permissive
|
pyGrowler/Growler
|
a46db1909d1877d332c103194dd1dabb1f8f3b15
|
5492466d8828115bb04c665917d6aeb4f4323f44
|
refs/heads/dev
| 2023-03-08T01:10:20.093556
| 2020-03-08T07:51:41
| 2020-03-08T07:51:41
| 25,556,740
| 814
| 38
| null | 2016-05-16T05:22:07
| 2014-10-22T02:11:12
|
Python
|
UTF-8
|
Python
| false
| false
| 435
|
py
|
setup.py
|
#!/usr/bin/env python3
#
# setup.py
#
from setuptools import setup
metadata = {}
with open("growler/__meta__.py") as f:
exec(f.read(), metadata)
tar_url = 'https://github.com/pyGrowler/growler/archive/v%s.tar.gz' % (metadata['version']) # noqa
# Other metadata and options can be found in setup.cfg
setup(
version=metadata['version'],
license=metadata['license'],
url=metadata['url'],
download_url=tar_url,
)
|
da9f09e70571e193916c46d58b305de94c46d628
|
6181fcd4a266d963a0ee85971768c97922ca77cd
|
/src/garage/torch/modules/multi_headed_mlp_module.py
|
fcb4479744c934f0b18d6a800e020244837affa3
|
[
"MIT"
] |
permissive
|
rlworkgroup/garage
|
5d215bbecb3a4e74b504988d6684a7b04df69a80
|
2d594803636e341660cab0e81343abbe9a325353
|
refs/heads/master
| 2023-08-21T22:58:49.338034
| 2023-01-04T06:06:27
| 2023-01-04T06:06:27
| 136,846,372
| 1,832
| 363
|
MIT
| 2023-09-11T11:36:40
| 2018-06-10T21:31:23
|
Python
|
UTF-8
|
Python
| false
| false
| 6,078
|
py
|
multi_headed_mlp_module.py
|
"""MultiHeadedMLPModule."""
import copy
import torch
import torch.nn as nn
from garage.torch import NonLinearity
class MultiHeadedMLPModule(nn.Module):
"""MultiHeadedMLPModule Model.
A PyTorch module composed only of a multi-layer perceptron (MLP) with
multiple parallel output layers which maps real-valued inputs to
real-valued outputs. The length of outputs is n_heads and shape of each
output element is depend on each output dimension
Args:
n_heads (int): Number of different output layers
input_dim (int): Dimension of the network input.
output_dims (int or list or tuple): Dimension of the network output.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means this MLP consists of two
hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable or torch.nn.Module or list or tuple):
Activation function for intermediate dense layer(s).
It should return a torch.Tensor. Set it to None to maintain a
linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
torch.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
torch.Tensor.
output_nonlinearities (callable or torch.nn.Module or list or tuple):
Activation function for output dense layer. It should return a
torch.Tensor. Set it to None to maintain a linear activation.
Size of the parameter should be 1 or equal to n_head
output_w_inits (callable or list or tuple): Initializer function for
the weight of output dense layer(s). The function should return a
torch.Tensor. Size of the parameter should be 1 or equal to n_head
output_b_inits (callable or list or tuple): Initializer function for
the bias of output dense layer(s). The function should return a
torch.Tensor. Size of the parameter should be 1 or equal to n_head
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
n_heads,
input_dim,
output_dims,
hidden_sizes,
hidden_nonlinearity=torch.relu,
hidden_w_init=nn.init.xavier_normal_,
hidden_b_init=nn.init.zeros_,
output_nonlinearities=None,
output_w_inits=nn.init.xavier_normal_,
output_b_inits=nn.init.zeros_,
layer_normalization=False):
super().__init__()
self._layers = nn.ModuleList()
output_dims = self._check_parameter_for_output_layer(
'output_dims', output_dims, n_heads)
output_w_inits = self._check_parameter_for_output_layer(
'output_w_inits', output_w_inits, n_heads)
output_b_inits = self._check_parameter_for_output_layer(
'output_b_inits', output_b_inits, n_heads)
output_nonlinearities = self._check_parameter_for_output_layer(
'output_nonlinearities', output_nonlinearities, n_heads)
self._layers = nn.ModuleList()
prev_size = input_dim
for size in hidden_sizes:
hidden_layers = nn.Sequential()
if layer_normalization:
hidden_layers.add_module('layer_normalization',
nn.LayerNorm(prev_size))
linear_layer = nn.Linear(prev_size, size)
hidden_w_init(linear_layer.weight)
hidden_b_init(linear_layer.bias)
hidden_layers.add_module('linear', linear_layer)
if hidden_nonlinearity:
hidden_layers.add_module('non_linearity',
NonLinearity(hidden_nonlinearity))
self._layers.append(hidden_layers)
prev_size = size
self._output_layers = nn.ModuleList()
for i in range(n_heads):
output_layer = nn.Sequential()
linear_layer = nn.Linear(prev_size, output_dims[i])
output_w_inits[i](linear_layer.weight)
output_b_inits[i](linear_layer.bias)
output_layer.add_module('linear', linear_layer)
if output_nonlinearities[i]:
output_layer.add_module('non_linearity',
NonLinearity(output_nonlinearities[i]))
self._output_layers.append(output_layer)
@classmethod
def _check_parameter_for_output_layer(cls, var_name, var, n_heads):
"""Check input parameters for output layer are valid.
Args:
var_name (str): variable name
var (any): variable to be checked
n_heads (int): number of head
Returns:
list: list of variables (length of n_heads)
Raises:
ValueError: if the variable is a list but length of the variable
is not equal to n_heads
"""
if isinstance(var, (list, tuple)):
if len(var) == 1:
return list(var) * n_heads
if len(var) == n_heads:
return var
msg = ('{} should be either an integer or a collection of length '
'n_heads ({}), but {} provided.')
raise ValueError(msg.format(var_name, n_heads, var))
return [copy.deepcopy(var) for _ in range(n_heads)]
# pylint: disable=arguments-differ
def forward(self, input_val):
"""Forward method.
Args:
input_val (torch.Tensor): Input values with (N, *, input_dim)
shape.
Returns:
List[torch.Tensor]: Output values
"""
x = input_val
for layer in self._layers:
x = layer(x)
return [output_layer(x) for output_layer in self._output_layers]
|
4baf3148aae5a9c38031744b8802db602eccc4c7
|
1095cfe2e29ddf4e4c5e12d713bd12f45c9b6f7d
|
/src/python/gem5/components/boards/se_binary_workload.py
|
c62a1b67eaf031b58d82f4cd8cd2a51543ca2d3c
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] |
permissive
|
gem5/gem5
|
9ec715ae036c2e08807b5919f114e1d38d189bce
|
48a40cf2f5182a82de360b7efa497d82e06b1631
|
refs/heads/stable
| 2023-09-03T15:56:25.819189
| 2023-08-31T05:53:03
| 2023-08-31T05:53:03
| 27,425,638
| 1,185
| 1,177
|
BSD-3-Clause
| 2023-09-14T08:29:31
| 2014-12-02T09:46:00
|
C++
|
UTF-8
|
Python
| false
| false
| 11,136
|
py
|
se_binary_workload.py
|
# Copyright (c) 2021 The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .abstract_board import AbstractBoard
from ...resources.resource import (
FileResource,
AbstractResource,
BinaryResource,
CheckpointResource,
SimpointResource,
SimpointDirectoryResource,
)
from ..processors.switchable_processor import SwitchableProcessor
from gem5.resources.elfie import ELFieInfo
from gem5.resources.looppoint import Looppoint
from m5.objects import SEWorkload, Process
from typing import Optional, List, Union
from m5.util import warn
from pathlib import Path
class SEBinaryWorkload:
"""
This class is used to enable simple Syscall-Execution (SE) mode execution
of a binary.
For this to function correctly the SEBinaryWorkload class should be added
as a superclass to a board (i.e., something that inherits from
AbstractBoard).
**Important Notes:** At present this implementation is limited. A single
process is added to all cores as the workload. Therefore, despite allowing
for multi-core setups, multi-program workloads are not presently supported.
"""
def set_se_binary_workload(
self,
binary: BinaryResource,
exit_on_work_items: bool = True,
stdin_file: Optional[FileResource] = None,
stdout_file: Optional[Path] = None,
stderr_file: Optional[Path] = None,
env_list: Optional[List[str]] = None,
arguments: List[str] = [],
checkpoint: Optional[Union[Path, CheckpointResource]] = None,
) -> None:
"""Set up the system to run a specific binary.
**Limitations**
* Only supports single threaded applications.
* Dynamically linked executables are partially supported when the host
ISA and the simulated ISA are the same.
:param binary: The resource encapsulating the binary to be run.
:param exit_on_work_items: Whether the simulation should exit on work
items. True by default.
:param stdin_file: The input file for the binary
:param stdout_file: The output file for the binary
:param stderr_file: The error output file for the binary
:param env_list: The environment variables defined for the binary
:param arguments: The input arguments for the binary
:param checkpoint: The checkpoint directory. Used to restore the
simulation to that checkpoint.
"""
# We assume this this is in a multiple-inheritance setup with an
# Abstract board. This function will not work otherwise.
assert isinstance(self, AbstractBoard)
# If we are setting a workload of this type, we need to run as a
# SE-mode simulation.
self._set_fullsystem(False)
binary_path = binary.get_local_path()
self.workload = SEWorkload.init_compatible(binary_path)
process = Process()
process.executable = binary_path
process.cmd = [binary_path] + arguments
if stdin_file is not None:
process.input = stdin_file.get_local_path()
if stdout_file is not None:
process.output = stdout_file.as_posix()
if stderr_file is not None:
process.errout = stderr_file.as_posix()
if env_list is not None:
process.env = env_list
if isinstance(self.get_processor(), SwitchableProcessor):
# This is a hack to get switchable processors working correctly in
# SE mode. The "get_cores" API for processors only gets the current
# switched-in cores and, in most cases, this is what the script
# required. In the case there are switched-out cores via the
# SwitchableProcessor, we sometimes need to apply things to ALL
# cores (switched-in or switched-out). In this case we have an
# `__all_cores` function. Here we must apply the process to every
# core.
#
# A better API for this which avoids `isinstance` checks would be
# welcome.
for core in self.get_processor()._all_cores():
core.set_workload(process)
else:
for core in self.get_processor().get_cores():
core.set_workload(process)
# Set whether to exit on work items for the se_workload
self.exit_on_work_items = exit_on_work_items
# Here we set `self._checkpoint`. This is then used by the
# Simulator module to setup checkpoints.
if checkpoint:
if isinstance(checkpoint, Path):
self._checkpoint = checkpoint
elif isinstance(checkpoint, AbstractResource):
self._checkpoint = Path(checkpoint.get_local_path())
else:
raise Exception(
"The checkpoint must be None, Path, or "
"AbstractResource."
)
def set_se_simpoint_workload(
self,
binary: BinaryResource,
arguments: List[str] = [],
simpoint: SimpointResource = None,
checkpoint: Optional[Union[Path, CheckpointResource]] = None,
) -> None:
"""Set up the system to run a SimPoint workload.
**Limitations**
* Only supports single threaded applications.
* Dynamically linked executables are partially supported when the host
ISA and the simulated ISA are the same.
**Warning:** Simpoints only works with one core
:param binary: The resource encapsulating the binary to be run.
:param arguments: The input arguments for the binary
:param simpoint: The SimpointResource that contains the list of
SimPoints starting instructions, the list of weights, and the SimPoints
interval
:param checkpoint: The checkpoint directory. Used to restore the
simulation to that checkpoint.
"""
self._simpoint_resource = simpoint
if self.get_processor().get_num_cores() > 1:
warn("SimPoints only works with one core")
self.get_processor().get_cores()[0]._set_simpoint(
inst_starts=self._simpoint_resource.get_simpoint_start_insts(),
board_initialized=False,
)
# Call set_se_binary_workload after SimPoint setup is complete
self.set_se_binary_workload(
binary=binary,
arguments=arguments,
checkpoint=checkpoint,
)
def get_simpoint(self) -> SimpointResource:
"""
Returns the SimpointResorce object set. If no SimpointResource object
has been set an exception is thrown.
"""
if getattr(self, "_simpoint_resource", None):
return self._simpoint_resource
raise Exception("This board does not have a simpoint set.")
def set_se_looppoint_workload(
self,
binary: AbstractResource,
looppoint: Looppoint,
arguments: List[str] = [],
checkpoint: Optional[Union[Path, AbstractResource]] = None,
region_id: Optional[Union[int, str]] = None,
) -> None:
"""Set up the system to run a LoopPoint workload.
**Limitations**
* Dynamically linked executables are partially supported when the host
ISA and the simulated ISA are the same.
:param binary: The resource encapsulating the binary to be run.
:param looppoint: The LoopPoint object that contain all the information
gather from the LoopPoint files and a LoopPointManager that will raise
exit events for LoopPoints
:param arguments: The input arguments for the binary
:param region_id: If set, will only load the Looppoint region
corresponding to that ID.
"""
assert isinstance(looppoint, Looppoint)
self._looppoint_object = looppoint
if region_id:
self._looppoint_object.set_target_region_id(region_id=region_id)
self._looppoint_object.setup_processor(self.get_processor())
# Call set_se_binary_workload after LoopPoint setup is complete
self.set_se_binary_workload(
binary=binary,
arguments=arguments,
checkpoint=checkpoint,
)
def set_se_elfie_workload(
self,
elfie: AbstractResource,
elfie_info: ELFieInfo,
arguments: List[str] = [],
checkpoint: Optional[Union[Path, AbstractResource]] = None,
) -> None:
"""Set up the system to run a ELFie workload.
**Limitations**
* Dynamically linked executables are partially supported when the host
ISA and the simulated ISA are the same.
:param elfie: The resource encapsulating the binary elfie to be run.
:param elfie_info: The ELFieInfo object that contain all the
information for the ELFie
:param arguments: The input arguments for the binary
"""
assert isinstance(elfie_info, ELFieInfo)
self._elfie_info_object = elfie_info
self._elfie_info_object.setup_processor(self.get_processor())
# Call set_se_binary_workload after LoopPoint setup is complete
self.set_se_binary_workload(
binary=elfie,
arguments=arguments,
checkpoint=checkpoint,
)
def get_looppoint(self) -> Looppoint:
"""
Returns the LoopPoint object set. If no LoopPoint object has been set
an exception is thrown.
"""
if getattr(self, "_looppoint_object", None):
return self._looppoint_object
raise Exception("This board does not have a looppoint set.")
|
5e95661a580912608f6402ef4740610f13d00eaa
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/dts/airbyte/airbyte-integrations/connectors/source-github/unit_tests/test_source.py
|
98ac1bc7fc809cf54b50bfd7f0fe8d46bc3e7305
|
[
"MIT",
"Elastic-2.0",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 5,292
|
py
|
test_source.py
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from unittest.mock import MagicMock
import pytest
import responses
from airbyte_cdk.models import AirbyteConnectionStatus, Status
from source_github.source import SourceGithub
def check_source(repo_line: str) -> AirbyteConnectionStatus:
source = SourceGithub()
config = {"access_token": "test_token", "repository": repo_line}
logger_mock = MagicMock()
return source.check(logger_mock, config)
@responses.activate
def test_check_connection_repos_only():
responses.add("GET", "https://api.github.com/repos/airbyte", json={"full_name": "airbyte"})
status = check_source("airbyte airbyte airbyte")
assert not status.message
assert status.status == Status.SUCCEEDED
# Only one request since 3 repos have same name
assert len(responses.calls) == 1
@responses.activate
def test_check_connection_repos_and_org_repos():
repos = [{"name": f"name {i}", "full_name": f"full name {i}", "updated_at": "2020-01-01T00:00:00Z"} for i in range(1000)]
responses.add(
"GET", "https://api.github.com/repos/airbyte/test", json={"full_name": "airbyte/test", "organization": {"login": "airbyte"}}
)
responses.add(
"GET", "https://api.github.com/repos/airbyte/test2", json={"full_name": "airbyte/test2", "organization": {"login": "airbyte"}}
)
responses.add("GET", "https://api.github.com/orgs/airbytehq/repos", json=repos)
responses.add("GET", "https://api.github.com/orgs/org/repos", json=repos)
status = check_source("airbyte/test airbyte/test2 airbytehq/* org/*")
assert not status.message
assert status.status == Status.SUCCEEDED
# Two requests for repos and two for organization
assert len(responses.calls) == 4
@responses.activate
def test_check_connection_org_only():
repos = [{"name": f"name {i}", "full_name": f"full name {i}", "updated_at": "2020-01-01T00:00:00Z"} for i in range(1000)]
responses.add("GET", "https://api.github.com/orgs/airbytehq/repos", json=repos)
status = check_source("airbytehq/*")
assert not status.message
assert status.status == Status.SUCCEEDED
# One request to check organization
assert len(responses.calls) == 1
@responses.activate
def test_get_branches_data():
repository_args = {"repositories": ["airbytehq/integration-test"], "page_size_for_large_streams": 10}
source = SourceGithub()
responses.add(
"GET",
"https://api.github.com/repos/airbytehq/integration-test",
json={"full_name": "airbytehq/integration-test", "default_branch": "master"},
)
responses.add(
"GET",
"https://api.github.com/repos/airbytehq/integration-test/branches",
json=[
{"repository": "airbytehq/integration-test", "name": "feature/branch_0"},
{"repository": "airbytehq/integration-test", "name": "feature/branch_1"},
{"repository": "airbytehq/integration-test", "name": "feature/branch_2"},
{"repository": "airbytehq/integration-test", "name": "master"},
],
)
default_branches, branches_to_pull = source._get_branches_data("", repository_args)
assert default_branches == {"airbytehq/integration-test": "master"}
assert branches_to_pull == {"airbytehq/integration-test": ["master"]}
default_branches, branches_to_pull = source._get_branches_data(
"airbytehq/integration-test/feature/branch_0 airbytehq/integration-test/feature/branch_1 airbytehq/integration-test/feature/branch_3",
repository_args,
)
assert default_branches == {"airbytehq/integration-test": "master"}
assert len(branches_to_pull["airbytehq/integration-test"]) == 2
assert "feature/branch_0" in branches_to_pull["airbytehq/integration-test"]
assert "feature/branch_1" in branches_to_pull["airbytehq/integration-test"]
@responses.activate
def test_get_org_repositories():
source = SourceGithub()
with pytest.raises(Exception):
config = {"repository": ""}
source._get_org_repositories(config, authenticator=None)
responses.add(
"GET",
"https://api.github.com/repos/airbytehq/integration-test",
json={"full_name": "airbytehq/integration-test", "organization": {"login": "airbytehq"}},
)
responses.add(
"GET",
"https://api.github.com/orgs/docker/repos",
json=[
{"full_name": "docker/docker-py", "updated_at": "2020-01-01T00:00:00Z"},
{"full_name": "docker/compose", "updated_at": "2020-01-01T00:00:00Z"},
],
)
config = {"repository": "airbytehq/integration-test docker/*"}
organisations, repositories = source._get_org_repositories(config, authenticator=None)
assert set(repositories) == {"airbytehq/integration-test", "docker/docker-py", "docker/compose"}
assert set(organisations) == {"airbytehq", "docker"}
def test_organization_or_repo_available():
SourceGithub._get_org_repositories = MagicMock(return_value=(False, False))
source = SourceGithub()
with pytest.raises(Exception) as exc_info:
config = {"access_token": "test_token", "repository": ""}
source.streams(config=config)
assert exc_info.value.args[0] == "No streams available. Please check permissions"
|
99a6a5dfbf75a315c0ca04c730cfd643ce712e71
|
568a2667a1b6ec33a0dec9ac01844ef74e11ab2b
|
/landlab/graph/object/at_patch.py
|
0d1d537fb64eebf9ff7372fae6a25dcdc447c75c
|
[
"MIT"
] |
permissive
|
landlab/landlab
|
0bcc9b7b1d8c4d7f79bad687e1526b80ebc83728
|
1cd72e5832ece1aa922cd1b239e2e94ed0f11f8b
|
refs/heads/master
| 2023-08-31T07:24:21.545523
| 2023-08-29T18:51:06
| 2023-08-29T18:51:06
| 19,599,383
| 326
| 313
|
MIT
| 2023-09-14T19:12:23
| 2014-05-09T04:52:50
|
Python
|
UTF-8
|
Python
| false
| false
| 617
|
py
|
at_patch.py
|
import numpy as np
from .ext.at_patch import get_nodes_at_patch as _get_nodes_at_patch
def get_nodes_at_patch(graph):
"""Set up data structure that describes node-patch connectivity.
Parameters
----------
links_at_patch: ndarray
Links that define each patch.
nodes_at_link: ndarray
Nodes that define each link.
Returns
-------
ndarray
Nodes that define each patch.
"""
nodes_at_patch = np.full(graph.links_at_patch.shape, -1, dtype=int)
_get_nodes_at_patch(graph.links_at_patch, graph.nodes_at_link, nodes_at_patch)
return nodes_at_patch
|
89c9c78bf6a22baacf12ef81073bd6b60702ada8
|
ebec36c2280a1aac0624019133bffe7152f09964
|
/tutorials/ner_tweets/scripts/constants.py
|
463141d50a5b515a1e65d6fbc9304c61ceb016cb
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
explosion/projects
|
8d783d5b150f03b6a68f345f1a50b3a692ff1745
|
e24a085669b4db6918ffeb2752846089d8dee57a
|
refs/heads/v3
| 2023-08-09T14:10:08.949067
| 2023-07-31T11:43:44
| 2023-07-31T11:43:44
| 223,165,649
| 1,171
| 492
|
MIT
| 2023-08-11T13:31:24
| 2019-11-21T12:08:52
|
Python
|
UTF-8
|
Python
| false
| false
| 640
|
py
|
constants.py
|
from pathlib import Path
ASSETS_PATH = Path(__file__).parent.parent / "assets"
# Source model / data for annotators
BTC_MODEL_PATH = ASSETS_PATH / "data" / "btc"
WIKIDATA_PATH = ASSETS_PATH / "wikidata_tokenised.json"
CRUNCHBASE_PATH = ASSETS_PATH / "crunchbase.json"
NAMES_PATH = ASSETS_PATH / "first_names.json"
# Taken from skweak's data utilities
# https://github.com/NorskRegnesentral/skweak/blob/670fcdec680930ce3e497886d06d61e6a1f2c195/examples/ner/data_utils.py
NAME_PREFIXES = [
"-",
"von",
"van",
"de",
"di",
"le",
"la",
"het",
"'t",
"dem",
"der",
"den",
"d'",
"ter",
]
|
b8082456d9088cb7a6794dff0268efbdcfada0fb
|
642ba1746fed0b722a127b8426eca987df6efc61
|
/test/rxd/hybrid/test_pure_diffusion_hybrid.py
|
cd5bd2c02b83e29ddca7d022e74e323ed8149da5
|
[
"BSD-3-Clause"
] |
permissive
|
neuronsimulator/nrn
|
23781d978fe9253b0e3543f41e27252532b35459
|
b786c36d715ba0f6da1ba8bdf5d2338c939ecf51
|
refs/heads/master
| 2023-08-09T00:13:11.123525
| 2023-08-04T13:11:02
| 2023-08-04T13:11:02
| 71,627,569
| 313
| 171
|
NOASSERTION
| 2023-09-14T17:48:03
| 2016-10-22T08:47:37
|
C++
|
UTF-8
|
Python
| false
| false
| 2,820
|
py
|
test_pure_diffusion_hybrid.py
|
import pytest
import numpy
from testutils import compare_data, tol
@pytest.fixture
def ics_diffusion_hybrid(neuron_instance):
"""A model using intracellular diffusion in a 1D and 3D sections"""
h, rxd, data, save_path = neuron_instance
dend1 = h.Section(name="dend1")
dend1.diam = 2
dend1.nseg = 1
dend1.L = 10
dend2 = h.Section(name="dend2")
dend2.diam = 2
dend2.nseg = 11
dend2.L = 10
dend3 = h.Section(name="dend3")
dend3.diam = 2
dend3.nseg = 1
dend3.L = 10
dend2.connect(dend1)
dend3.connect(dend2)
diff_constant = 1
r = rxd.Region(h.allsec(), dx=0.75)
rxd.set_solve_type([dend2], dimension=3)
ca = rxd.Species(
r,
d=diff_constant,
initial=lambda node: 1
if (0.8 < node.x and node in dend1) or (node.x < 0.2 and node in dend2)
else 0,
)
model = ([dend1, dend2, dend3], r, ca)
yield (neuron_instance, model)
def test_pure_diffusion_hybrid(ics_diffusion_hybrid):
"""Test ics_diffusion_hybrid with fixed step methods"""
neuron_instance, model = ics_diffusion_hybrid
h, rxd, data, save_path = neuron_instance
dend, r, ca = model
h.dt *= 50
h.finitialize(-65)
loss = -(numpy.array(ca.nodes.concentration) * numpy.array(ca.nodes.volume)).sum()
h.continuerun(125)
loss += (numpy.array(ca.nodes.concentration) * numpy.array(ca.nodes.volume)).sum()
if not save_path:
assert loss < tol
max_err = compare_data(data)
assert max_err < tol
def test_pure_diffusion_hybrid_cvode(ics_diffusion_hybrid):
"""Test ics_diffusion_hybrid with variable step methods"""
neuron_instance, model = ics_diffusion_hybrid
h, rxd, data, save_path = neuron_instance
dend, r, ca = model
h.CVode().active(True)
h.finitialize(-65)
loss = -(numpy.array(ca.nodes.concentration) * numpy.array(ca.nodes.volume)).sum()
h.continuerun(125)
loss += (numpy.array(ca.nodes.concentration) * numpy.array(ca.nodes.volume)).sum()
if not save_path:
assert loss < tol
max_err = compare_data(data)
assert max_err < tol
def test_pure_diffusion_hybrid_small_grid(ics_diffusion_hybrid):
"""Test ics_diffusion_hybrid with fixed step methods where 1D sections are
outside the 3D grid
"""
neuron_instance, model = ics_diffusion_hybrid
h, rxd, data, save_path = neuron_instance
dend, r, ca = model
dend[1].diam = 0.75
h.dt *= 50
h.finitialize(-65)
loss = -(numpy.array(ca.nodes.concentration) * numpy.array(ca.nodes.volume)).sum()
h.continuerun(125)
loss += (numpy.array(ca.nodes.concentration) * numpy.array(ca.nodes.volume)).sum()
if not save_path:
assert loss < tol
max_err = compare_data(data)
assert max_err < tol
|
414d107528ac4df1c1d369500c5207643353899a
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/python/Pygments/py3/pygments/lexers/asm.py
|
0035c723f094771057a20308f2cb1d9129079e50
|
[
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 41,243
|
py
|
asm.py
|
"""
pygments.lexers.asm
~~~~~~~~~~~~~~~~~~~
Lexers for assembly languages.
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, words, \
DelegatingLexer, default
from pygments.lexers.c_cpp import CppLexer, CLexer
from pygments.lexers.d import DLexer
from pygments.token import Text, Name, Number, String, Comment, Punctuation, \
Other, Keyword, Operator, Whitespace
__all__ = ['GasLexer', 'ObjdumpLexer', 'DObjdumpLexer', 'CppObjdumpLexer',
'CObjdumpLexer', 'HsailLexer', 'LlvmLexer', 'LlvmMirBodyLexer',
'LlvmMirLexer', 'NasmLexer', 'NasmObjdumpLexer', 'TasmLexer',
'Ca65Lexer', 'Dasm16Lexer']
class GasLexer(RegexLexer):
"""
For Gas (AT&T) assembly code.
"""
name = 'GAS'
aliases = ['gas', 'asm']
filenames = ['*.s', '*.S']
mimetypes = ['text/x-gas']
#: optional Comment or Whitespace
string = r'"(\\"|[^"])*"'
char = r'[\w$.@-]'
identifier = r'(?:[a-zA-Z$_]' + char + r'*|\.' + char + '+)'
number = r'(?:0[xX][a-fA-F0-9]+|#?-?\d+)'
register = '%' + identifier + r'\b'
tokens = {
'root': [
include('whitespace'),
(identifier + ':', Name.Label),
(r'\.' + identifier, Name.Attribute, 'directive-args'),
(r'lock|rep(n?z)?|data\d+', Name.Attribute),
(identifier, Name.Function, 'instruction-args'),
(r'[\r\n]+', Text)
],
'directive-args': [
(identifier, Name.Constant),
(string, String),
('@' + identifier, Name.Attribute),
(number, Number.Integer),
(register, Name.Variable),
(r'[\r\n]+', Whitespace, '#pop'),
(r'([;#]|//).*?\n', Comment.Single, '#pop'),
(r'/[*].*?[*]/', Comment.Multiline),
(r'/[*].*?\n[\w\W]*?[*]/', Comment.Multiline, '#pop'),
include('punctuation'),
include('whitespace')
],
'instruction-args': [
# For objdump-disassembled code, shouldn't occur in
# actual assembler input
('([a-z0-9]+)( )(<)('+identifier+')(>)',
bygroups(Number.Hex, Text, Punctuation, Name.Constant,
Punctuation)),
('([a-z0-9]+)( )(<)('+identifier+')([-+])('+number+')(>)',
bygroups(Number.Hex, Text, Punctuation, Name.Constant,
Punctuation, Number.Integer, Punctuation)),
# Address constants
(identifier, Name.Constant),
(number, Number.Integer),
# Registers
(register, Name.Variable),
# Numeric constants
('$'+number, Number.Integer),
(r"$'(.|\\')'", String.Char),
(r'[\r\n]+', Whitespace, '#pop'),
(r'([;#]|//).*?\n', Comment.Single, '#pop'),
(r'/[*].*?[*]/', Comment.Multiline),
(r'/[*].*?\n[\w\W]*?[*]/', Comment.Multiline, '#pop'),
include('punctuation'),
include('whitespace')
],
'whitespace': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
(r'([;#]|//).*?\n', Comment.Single),
(r'/[*][\w\W]*?[*]/', Comment.Multiline)
],
'punctuation': [
(r'[-*,.()\[\]!:{}]+', Punctuation)
]
}
def analyse_text(text):
if re.search(r'^\.(text|data|section)', text, re.M):
return True
elif re.search(r'^\.\w+', text, re.M):
return 0.1
def _objdump_lexer_tokens(asm_lexer):
"""
Common objdump lexer tokens to wrap an ASM lexer.
"""
hex_re = r'[0-9A-Za-z]'
return {
'root': [
# File name & format:
('(.*?)(:)( +file format )(.*?)$',
bygroups(Name.Label, Punctuation, Text, String)),
# Section header
('(Disassembly of section )(.*?)(:)$',
bygroups(Text, Name.Label, Punctuation)),
# Function labels
# (With offset)
('('+hex_re+'+)( )(<)(.*?)([-+])(0[xX][A-Za-z0-9]+)(>:)$',
bygroups(Number.Hex, Whitespace, Punctuation, Name.Function,
Punctuation, Number.Hex, Punctuation)),
# (Without offset)
('('+hex_re+'+)( )(<)(.*?)(>:)$',
bygroups(Number.Hex, Whitespace, Punctuation, Name.Function,
Punctuation)),
# Code line with disassembled instructions
('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)( *\t)([a-zA-Z].*?)$',
bygroups(Whitespace, Name.Label, Whitespace, Number.Hex, Whitespace,
using(asm_lexer))),
# Code line without raw instructions (objdump --no-show-raw-insn)
('( *)('+hex_re+r'+:)( *\t)([a-zA-Z].*?)$',
bygroups(Whitespace, Name.Label, Whitespace,
using(asm_lexer))),
# Code line with ascii
('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)( *)(.*?)$',
bygroups(Whitespace, Name.Label, Whitespace, Number.Hex, Whitespace, String)),
# Continued code line, only raw opcodes without disassembled
# instruction
('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)$',
bygroups(Whitespace, Name.Label, Whitespace, Number.Hex)),
# Skipped a few bytes
(r'\t\.\.\.$', Text),
# Relocation line
# (With offset)
(r'(\t\t\t)('+hex_re+r'+:)( )([^\t]+)(\t)(.*?)([-+])(0x'+hex_re+'+)$',
bygroups(Whitespace, Name.Label, Whitespace, Name.Property, Whitespace,
Name.Constant, Punctuation, Number.Hex)),
# (Without offset)
(r'(\t\t\t)('+hex_re+r'+:)( )([^\t]+)(\t)(.*?)$',
bygroups(Whitespace, Name.Label, Whitespace, Name.Property, Whitespace,
Name.Constant)),
(r'[^\n]+\n', Other)
]
}
class ObjdumpLexer(RegexLexer):
"""
For the output of ``objdump -dr``.
"""
name = 'objdump'
aliases = ['objdump']
filenames = ['*.objdump']
mimetypes = ['text/x-objdump']
tokens = _objdump_lexer_tokens(GasLexer)
class DObjdumpLexer(DelegatingLexer):
"""
For the output of ``objdump -Sr`` on compiled D files.
"""
name = 'd-objdump'
aliases = ['d-objdump']
filenames = ['*.d-objdump']
mimetypes = ['text/x-d-objdump']
def __init__(self, **options):
super().__init__(DLexer, ObjdumpLexer, **options)
class CppObjdumpLexer(DelegatingLexer):
"""
For the output of ``objdump -Sr`` on compiled C++ files.
"""
name = 'cpp-objdump'
aliases = ['cpp-objdump', 'c++-objdumb', 'cxx-objdump']
filenames = ['*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump']
mimetypes = ['text/x-cpp-objdump']
def __init__(self, **options):
super().__init__(CppLexer, ObjdumpLexer, **options)
class CObjdumpLexer(DelegatingLexer):
"""
For the output of ``objdump -Sr`` on compiled C files.
"""
name = 'c-objdump'
aliases = ['c-objdump']
filenames = ['*.c-objdump']
mimetypes = ['text/x-c-objdump']
def __init__(self, **options):
super().__init__(CLexer, ObjdumpLexer, **options)
class HsailLexer(RegexLexer):
"""
For HSAIL assembly code.
.. versionadded:: 2.2
"""
name = 'HSAIL'
aliases = ['hsail', 'hsa']
filenames = ['*.hsail']
mimetypes = ['text/x-hsail']
string = r'"[^"]*?"'
identifier = r'[a-zA-Z_][\w.]*'
# Registers
register_number = r'[0-9]+'
register = r'(\$(c|s|d|q)' + register_number + r')\b'
# Qualifiers
alignQual = r'(align\(\d+\))'
widthQual = r'(width\((\d+|all)\))'
allocQual = r'(alloc\(agent\))'
# Instruction Modifiers
roundingMod = (r'((_ftz)?(_up|_down|_zero|_near))')
datatypeMod = (r'_('
# packedTypes
r'u8x4|s8x4|u16x2|s16x2|u8x8|s8x8|u16x4|s16x4|u32x2|s32x2|'
r'u8x16|s8x16|u16x8|s16x8|u32x4|s32x4|u64x2|s64x2|'
r'f16x2|f16x4|f16x8|f32x2|f32x4|f64x2|'
# baseTypes
r'u8|s8|u16|s16|u32|s32|u64|s64|'
r'b128|b8|b16|b32|b64|b1|'
r'f16|f32|f64|'
# opaqueType
r'roimg|woimg|rwimg|samp|sig32|sig64)')
# Numeric Constant
float = r'((\d+\.)|(\d*\.\d+))[eE][+-]?\d+'
hexfloat = r'0[xX](([0-9a-fA-F]+\.[0-9a-fA-F]*)|([0-9a-fA-F]*\.[0-9a-fA-F]+))[pP][+-]?\d+'
ieeefloat = r'0((h|H)[0-9a-fA-F]{4}|(f|F)[0-9a-fA-F]{8}|(d|D)[0-9a-fA-F]{16})'
tokens = {
'root': [
include('whitespace'),
include('comments'),
(string, String),
(r'@' + identifier + ':?', Name.Label),
(register, Name.Variable.Anonymous),
include('keyword'),
(r'&' + identifier, Name.Variable.Global),
(r'%' + identifier, Name.Variable),
(hexfloat, Number.Hex),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(ieeefloat, Number.Float),
(float, Number.Float),
(r'\d+', Number.Integer),
(r'[=<>{}\[\]()*.,:;!]|x\b', Punctuation)
],
'whitespace': [
(r'(\n|\s)+', Whitespace),
],
'comments': [
(r'/\*.*?\*/', Comment.Multiline),
(r'//.*?\n', Comment.Single),
],
'keyword': [
# Types
(r'kernarg' + datatypeMod, Keyword.Type),
# Regular keywords
(r'\$(full|base|small|large|default|zero|near)', Keyword),
(words((
'module', 'extension', 'pragma', 'prog', 'indirect', 'signature',
'decl', 'kernel', 'function', 'enablebreakexceptions',
'enabledetectexceptions', 'maxdynamicgroupsize', 'maxflatgridsize',
'maxflatworkgroupsize', 'requireddim', 'requiredgridsize',
'requiredworkgroupsize', 'requirenopartialworkgroups'),
suffix=r'\b'), Keyword),
# instructions
(roundingMod, Keyword),
(datatypeMod, Keyword),
(r'_(' + alignQual + '|' + widthQual + ')', Keyword),
(r'_kernarg', Keyword),
(r'(nop|imagefence)\b', Keyword),
(words((
'cleardetectexcept', 'clock', 'cuid', 'debugtrap', 'dim',
'getdetectexcept', 'groupbaseptr', 'kernargbaseptr', 'laneid',
'maxcuid', 'maxwaveid', 'packetid', 'setdetectexcept', 'waveid',
'workitemflatabsid', 'workitemflatid', 'nullptr', 'abs', 'bitrev',
'currentworkgroupsize', 'currentworkitemflatid', 'fract', 'ncos',
'neg', 'nexp2', 'nlog2', 'nrcp', 'nrsqrt', 'nsin', 'nsqrt',
'gridgroups', 'gridsize', 'not', 'sqrt', 'workgroupid',
'workgroupsize', 'workitemabsid', 'workitemid', 'ceil', 'floor',
'rint', 'trunc', 'add', 'bitmask', 'borrow', 'carry', 'copysign',
'div', 'rem', 'sub', 'shl', 'shr', 'and', 'or', 'xor', 'unpackhi',
'unpacklo', 'max', 'min', 'fma', 'mad', 'bitextract', 'bitselect',
'shuffle', 'cmov', 'bitalign', 'bytealign', 'lerp', 'nfma', 'mul',
'mulhi', 'mul24hi', 'mul24', 'mad24', 'mad24hi', 'bitinsert',
'combine', 'expand', 'lda', 'mov', 'pack', 'unpack', 'packcvt',
'unpackcvt', 'sad', 'sementp', 'ftos', 'stof', 'cmp', 'ld', 'st',
'_eq', '_ne', '_lt', '_le', '_gt', '_ge', '_equ', '_neu', '_ltu',
'_leu', '_gtu', '_geu', '_num', '_nan', '_seq', '_sne', '_slt',
'_sle', '_sgt', '_sge', '_snum', '_snan', '_sequ', '_sneu', '_sltu',
'_sleu', '_sgtu', '_sgeu', 'atomic', '_ld', '_st', '_cas', '_add',
'_and', '_exch', '_max', '_min', '_or', '_sub', '_wrapdec',
'_wrapinc', '_xor', 'ret', 'cvt', '_readonly', '_kernarg', '_global',
'br', 'cbr', 'sbr', '_scacq', '_screl', '_scar', '_rlx', '_wave',
'_wg', '_agent', '_system', 'ldimage', 'stimage', '_v2', '_v3', '_v4',
'_1d', '_2d', '_3d', '_1da', '_2da', '_1db', '_2ddepth', '_2dadepth',
'_width', '_height', '_depth', '_array', '_channelorder',
'_channeltype', 'querysampler', '_coord', '_filter', '_addressing',
'barrier', 'wavebarrier', 'initfbar', 'joinfbar', 'waitfbar',
'arrivefbar', 'leavefbar', 'releasefbar', 'ldf', 'activelaneid',
'activelanecount', 'activelanemask', 'activelanepermute', 'call',
'scall', 'icall', 'alloca', 'packetcompletionsig',
'addqueuewriteindex', 'casqueuewriteindex', 'ldqueuereadindex',
'stqueuereadindex', 'readonly', 'global', 'private', 'group',
'spill', 'arg', '_upi', '_downi', '_zeroi', '_neari', '_upi_sat',
'_downi_sat', '_zeroi_sat', '_neari_sat', '_supi', '_sdowni',
'_szeroi', '_sneari', '_supi_sat', '_sdowni_sat', '_szeroi_sat',
'_sneari_sat', '_pp', '_ps', '_sp', '_ss', '_s', '_p', '_pp_sat',
'_ps_sat', '_sp_sat', '_ss_sat', '_s_sat', '_p_sat')), Keyword),
# Integer types
(r'i[1-9]\d*', Keyword)
]
}
class LlvmLexer(RegexLexer):
"""
For LLVM assembly code.
"""
name = 'LLVM'
url = 'https://llvm.org/docs/LangRef.html'
aliases = ['llvm']
filenames = ['*.ll']
mimetypes = ['text/x-llvm']
#: optional Comment or Whitespace
string = r'"[^"]*?"'
identifier = r'([-a-zA-Z$._][\w\-$.]*|' + string + ')'
block_label = r'(' + identifier + r'|(\d+))'
tokens = {
'root': [
include('whitespace'),
# Before keywords, because keywords are valid label names :(...
(block_label + r'\s*:', Name.Label),
include('keyword'),
(r'%' + identifier, Name.Variable),
(r'@' + identifier, Name.Variable.Global),
(r'%\d+', Name.Variable.Anonymous),
(r'@\d+', Name.Variable.Global),
(r'#\d+', Name.Variable.Global),
(r'!' + identifier, Name.Variable),
(r'!\d+', Name.Variable.Anonymous),
(r'c?' + string, String),
(r'0[xX][a-fA-F0-9]+', Number),
(r'-?\d+(?:[.]\d+)?(?:[eE][-+]?\d+(?:[.]\d+)?)?', Number),
(r'[=<>{}\[\]()*.,!]|x\b', Punctuation)
],
'whitespace': [
(r'(\n|\s+)+', Whitespace),
(r';.*?\n', Comment)
],
'keyword': [
# Regular keywords
(words((
'aarch64_sve_vector_pcs', 'aarch64_vector_pcs', 'acq_rel',
'acquire', 'add', 'addrspace', 'addrspacecast', 'afn', 'alias',
'aliasee', 'align', 'alignLog2', 'alignstack', 'alloca',
'allocsize', 'allOnes', 'alwaysinline', 'alwaysInline',
'amdgpu_cs', 'amdgpu_es', 'amdgpu_gfx', 'amdgpu_gs',
'amdgpu_hs', 'amdgpu_kernel', 'amdgpu_ls', 'amdgpu_ps',
'amdgpu_vs', 'and', 'any', 'anyregcc', 'appending', 'arcp',
'argmemonly', 'args', 'arm_aapcs_vfpcc', 'arm_aapcscc',
'arm_apcscc', 'ashr', 'asm', 'atomic', 'atomicrmw',
'attributes', 'available_externally', 'avr_intrcc',
'avr_signalcc', 'bit', 'bitcast', 'bitMask', 'blockaddress',
'blockcount', 'br', 'branchFunnel', 'builtin', 'byArg',
'byref', 'byte', 'byteArray', 'byval', 'c', 'call', 'callbr',
'callee', 'caller', 'calls', 'canAutoHide', 'catch',
'catchpad', 'catchret', 'catchswitch', 'cc', 'ccc',
'cfguard_checkcc', 'cleanup', 'cleanuppad', 'cleanupret',
'cmpxchg', 'cold', 'coldcc', 'comdat', 'common', 'constant',
'contract', 'convergent', 'critical', 'cxx_fast_tlscc',
'datalayout', 'declare', 'default', 'define', 'deplibs',
'dereferenceable', 'dereferenceable_or_null', 'distinct',
'dllexport', 'dllimport', 'dso_local', 'dso_local_equivalent',
'dso_preemptable', 'dsoLocal', 'eq', 'exact', 'exactmatch',
'extern_weak', 'external', 'externally_initialized',
'extractelement', 'extractvalue', 'fadd', 'false', 'fast',
'fastcc', 'fcmp', 'fdiv', 'fence', 'filter', 'flags', 'fmul',
'fneg', 'fpext', 'fptosi', 'fptoui', 'fptrunc', 'freeze',
'frem', 'from', 'fsub', 'funcFlags', 'function', 'gc',
'getelementptr', 'ghccc', 'global', 'guid', 'gv', 'hash',
'hhvm_ccc', 'hhvmcc', 'hidden', 'hot', 'hotness', 'icmp',
'ifunc', 'inaccessiblemem_or_argmemonly',
'inaccessiblememonly', 'inalloca', 'inbounds', 'indir',
'indirectbr', 'info', 'initialexec', 'inline', 'inlineBits',
'inlinehint', 'inrange', 'inreg', 'insertelement',
'insertvalue', 'insts', 'intel_ocl_bicc', 'inteldialect',
'internal', 'inttoptr', 'invoke', 'jumptable', 'kind',
'landingpad', 'largest', 'linkage', 'linkonce', 'linkonce_odr',
'live', 'load', 'local_unnamed_addr', 'localdynamic',
'localexec', 'lshr', 'max', 'metadata', 'min', 'minsize',
'module', 'monotonic', 'msp430_intrcc', 'mul', 'mustprogress',
'musttail', 'naked', 'name', 'nand', 'ne', 'nest', 'ninf',
'nnan', 'noalias', 'nobuiltin', 'nocallback', 'nocapture',
'nocf_check', 'noduplicate', 'noduplicates', 'nofree',
'noimplicitfloat', 'noinline', 'noInline', 'nomerge', 'none',
'nonlazybind', 'nonnull', 'noprofile', 'norecurse',
'noRecurse', 'noredzone', 'noreturn', 'nosync', 'notail',
'notEligibleToImport', 'noundef', 'nounwind', 'nsw',
'nsz', 'null', 'null_pointer_is_valid', 'nuw', 'oeq', 'offset',
'oge', 'ogt', 'ole', 'olt', 'one', 'opaque', 'optforfuzzing',
'optnone', 'optsize', 'or', 'ord', 'param', 'params',
'partition', 'path', 'personality', 'phi', 'poison',
'preallocated', 'prefix', 'preserve_allcc', 'preserve_mostcc',
'private', 'prologue', 'protected', 'ptrtoint', 'ptx_device',
'ptx_kernel', 'readnone', 'readNone', 'readonly', 'readOnly',
'reassoc', 'refs', 'relbf', 'release', 'resByArg', 'resume',
'ret', 'returnDoesNotAlias', 'returned', 'returns_twice',
'safestack', 'samesize', 'sanitize_address',
'sanitize_hwaddress', 'sanitize_memory', 'sanitize_memtag',
'sanitize_thread', 'sdiv', 'section', 'select', 'seq_cst',
'sext', 'sge', 'sgt', 'shadowcallstack', 'shl',
'shufflevector', 'sideeffect', 'signext', 'single',
'singleImpl', 'singleImplName', 'sitofp', 'sizeM1',
'sizeM1BitWidth', 'sle', 'slt', 'source_filename',
'speculatable', 'speculative_load_hardening', 'spir_func',
'spir_kernel', 'srem', 'sret', 'ssp', 'sspreq', 'sspstrong',
'store', 'strictfp', 'sub', 'summaries', 'summary', 'swiftcc',
'swifterror', 'swiftself', 'switch', 'syncscope', 'tail',
'tailcc', 'target', 'thread_local', 'to', 'token', 'triple',
'true', 'trunc', 'type', 'typeCheckedLoadConstVCalls',
'typeCheckedLoadVCalls', 'typeid', 'typeidCompatibleVTable',
'typeIdInfo', 'typeTestAssumeConstVCalls',
'typeTestAssumeVCalls', 'typeTestRes', 'typeTests', 'udiv',
'ueq', 'uge', 'ugt', 'uitofp', 'ule', 'ult', 'umax', 'umin',
'undef', 'une', 'uniformRetVal', 'uniqueRetVal', 'unknown',
'unnamed_addr', 'uno', 'unordered', 'unreachable', 'unsat',
'unwind', 'urem', 'uselistorder', 'uselistorder_bb', 'uwtable',
'va_arg', 'varFlags', 'variable', 'vcall_visibility',
'vFuncId', 'virtFunc', 'virtualConstProp', 'void', 'volatile',
'vscale', 'vTableFuncs', 'weak', 'weak_odr', 'webkit_jscc',
'win64cc', 'within', 'wpdRes', 'wpdResolutions', 'writeonly',
'x', 'x86_64_sysvcc', 'x86_fastcallcc', 'x86_intrcc',
'x86_mmx', 'x86_regcallcc', 'x86_stdcallcc', 'x86_thiscallcc',
'x86_vectorcallcc', 'xchg', 'xor', 'zeroext',
'zeroinitializer', 'zext', 'immarg', 'willreturn'),
suffix=r'\b'), Keyword),
# Types
(words(('void', 'half', 'bfloat', 'float', 'double', 'fp128',
'x86_fp80', 'ppc_fp128', 'label', 'metadata', 'x86_mmx',
'x86_amx', 'token', 'ptr')),
Keyword.Type),
# Integer types
(r'i[1-9]\d*', Keyword.Type)
]
}
class LlvmMirBodyLexer(RegexLexer):
"""
For LLVM MIR examples without the YAML wrapper.
.. versionadded:: 2.6
"""
name = 'LLVM-MIR Body'
url = 'https://llvm.org/docs/MIRLangRef.html'
aliases = ['llvm-mir-body']
filenames = []
mimetypes = []
tokens = {
'root': [
# Attributes on basic blocks
(words(('liveins', 'successors'), suffix=':'), Keyword),
# Basic Block Labels
(r'bb\.[0-9]+(\.[a-zA-Z0-9_.-]+)?( \(address-taken\))?:', Name.Label),
(r'bb\.[0-9]+ \(%[a-zA-Z0-9_.-]+\)( \(address-taken\))?:', Name.Label),
(r'%bb\.[0-9]+(\.\w+)?', Name.Label),
# Stack references
(r'%stack\.[0-9]+(\.\w+\.addr)?', Name),
# Subreg indices
(r'%subreg\.\w+', Name),
# Virtual registers
(r'%[a-zA-Z0-9_]+ *', Name.Variable, 'vreg'),
# Reference to LLVM-IR global
include('global'),
# Reference to Intrinsic
(r'intrinsic\(\@[a-zA-Z0-9_.]+\)', Name.Variable.Global),
# Comparison predicates
(words(('eq', 'ne', 'sgt', 'sge', 'slt', 'sle', 'ugt', 'uge', 'ult',
'ule'), prefix=r'intpred\(', suffix=r'\)'), Name.Builtin),
(words(('oeq', 'one', 'ogt', 'oge', 'olt', 'ole', 'ugt', 'uge',
'ult', 'ule'), prefix=r'floatpred\(', suffix=r'\)'),
Name.Builtin),
# Physical registers
(r'\$\w+', String.Single),
# Assignment operator
(r'=', Operator),
# gMIR Opcodes
(r'(G_ANYEXT|G_[SZ]EXT|G_SEXT_INREG|G_TRUNC|G_IMPLICIT_DEF|G_PHI|'
r'G_FRAME_INDEX|G_GLOBAL_VALUE|G_INTTOPTR|G_PTRTOINT|G_BITCAST|'
r'G_CONSTANT|G_FCONSTANT|G_VASTART|G_VAARG|G_CTLZ|G_CTLZ_ZERO_UNDEF|'
r'G_CTTZ|G_CTTZ_ZERO_UNDEF|G_CTPOP|G_BSWAP|G_BITREVERSE|'
r'G_ADDRSPACE_CAST|G_BLOCK_ADDR|G_JUMP_TABLE|G_DYN_STACKALLOC|'
r'G_ADD|G_SUB|G_MUL|G_[SU]DIV|G_[SU]REM|G_AND|G_OR|G_XOR|G_SHL|'
r'G_[LA]SHR|G_[IF]CMP|G_SELECT|G_GEP|G_PTR_MASK|G_SMIN|G_SMAX|'
r'G_UMIN|G_UMAX|G_[US]ADDO|G_[US]ADDE|G_[US]SUBO|G_[US]SUBE|'
r'G_[US]MULO|G_[US]MULH|G_FNEG|G_FPEXT|G_FPTRUNC|G_FPTO[US]I|'
r'G_[US]ITOFP|G_FABS|G_FCOPYSIGN|G_FCANONICALIZE|G_FMINNUM|'
r'G_FMAXNUM|G_FMINNUM_IEEE|G_FMAXNUM_IEEE|G_FMINIMUM|G_FMAXIMUM|'
r'G_FADD|G_FSUB|G_FMUL|G_FMA|G_FMAD|G_FDIV|G_FREM|G_FPOW|G_FEXP|'
r'G_FEXP2|G_FLOG|G_FLOG2|G_FLOG10|G_FCEIL|G_FCOS|G_FSIN|G_FSQRT|'
r'G_FFLOOR|G_FRINT|G_FNEARBYINT|G_INTRINSIC_TRUNC|'
r'G_INTRINSIC_ROUND|G_LOAD|G_[ZS]EXTLOAD|G_INDEXED_LOAD|'
r'G_INDEXED_[ZS]EXTLOAD|G_STORE|G_INDEXED_STORE|'
r'G_ATOMIC_CMPXCHG_WITH_SUCCESS|G_ATOMIC_CMPXCHG|'
r'G_ATOMICRMW_(XCHG|ADD|SUB|AND|NAND|OR|XOR|MAX|MIN|UMAX|UMIN|FADD|'
r'FSUB)'
r'|G_FENCE|G_EXTRACT|G_UNMERGE_VALUES|G_INSERT|G_MERGE_VALUES|'
r'G_BUILD_VECTOR|G_BUILD_VECTOR_TRUNC|G_CONCAT_VECTORS|'
r'G_INTRINSIC|G_INTRINSIC_W_SIDE_EFFECTS|G_BR|G_BRCOND|'
r'G_BRINDIRECT|G_BRJT|G_INSERT_VECTOR_ELT|G_EXTRACT_VECTOR_ELT|'
r'G_SHUFFLE_VECTOR)\b',
Name.Builtin),
# Target independent opcodes
(r'(COPY|PHI|INSERT_SUBREG|EXTRACT_SUBREG|REG_SEQUENCE)\b',
Name.Builtin),
# Flags
(words(('killed', 'implicit')), Keyword),
# ConstantInt values
(r'(i[0-9]+)( +)', bygroups(Keyword.Type, Whitespace), 'constantint'),
# ConstantFloat values
(r'(half|float|double) +', Keyword.Type, 'constantfloat'),
# Bare immediates
include('integer'),
# MMO's
(r'(::)( *)', bygroups(Operator, Whitespace), 'mmo'),
# MIR Comments
(r';.*', Comment),
# If we get here, assume it's a target instruction
(r'[a-zA-Z0-9_]+', Name),
# Everything else that isn't highlighted
(r'[(), \n]+', Text),
],
# The integer constant from a ConstantInt value
'constantint': [
include('integer'),
(r'(?=.)', Text, '#pop'),
],
# The floating point constant from a ConstantFloat value
'constantfloat': [
include('float'),
(r'(?=.)', Text, '#pop'),
],
'vreg': [
# The bank or class if there is one
(r'( *)(:(?!:))', bygroups(Whitespace, Keyword), ('#pop', 'vreg_bank_or_class')),
# The LLT if there is one
(r'( *)(\()', bygroups(Whitespace, Text), 'vreg_type'),
(r'(?=.)', Text, '#pop'),
],
'vreg_bank_or_class': [
# The unassigned bank/class
(r'( *)(_)', bygroups(Whitespace, Name.Variable.Magic)),
(r'( *)([a-zA-Z0-9_]+)', bygroups(Whitespace, Name.Variable)),
# The LLT if there is one
(r'( *)(\()', bygroups(Whitespace, Text), 'vreg_type'),
(r'(?=.)', Text, '#pop'),
],
'vreg_type': [
# Scalar and pointer types
(r'( *)([sp][0-9]+)', bygroups(Whitespace, Keyword.Type)),
(r'( *)(<[0-9]+ *x *[sp][0-9]+>)', bygroups(Whitespace, Keyword.Type)),
(r'\)', Text, '#pop'),
(r'(?=.)', Text, '#pop'),
],
'mmo': [
(r'\(', Text),
(r' +', Whitespace),
(words(('load', 'store', 'on', 'into', 'from', 'align', 'monotonic',
'acquire', 'release', 'acq_rel', 'seq_cst')),
Keyword),
# IR references
(r'%ir\.[a-zA-Z0-9_.-]+', Name),
(r'%ir-block\.[a-zA-Z0-9_.-]+', Name),
(r'[-+]', Operator),
include('integer'),
include('global'),
(r',', Punctuation),
(r'\), \(', Text),
(r'\)', Text, '#pop'),
],
'integer': [(r'-?[0-9]+', Number.Integer),],
'float': [(r'-?[0-9]+\.[0-9]+(e[+-][0-9]+)?', Number.Float)],
'global': [(r'\@[a-zA-Z0-9_.]+', Name.Variable.Global)],
}
class LlvmMirLexer(RegexLexer):
"""
Lexer for the overall LLVM MIR document format.
MIR is a human readable serialization format that's used to represent LLVM's
machine specific intermediate representation. It allows LLVM's developers to
see the state of the compilation process at various points, as well as test
individual pieces of the compiler.
.. versionadded:: 2.6
"""
name = 'LLVM-MIR'
url = 'https://llvm.org/docs/MIRLangRef.html'
aliases = ['llvm-mir']
filenames = ['*.mir']
tokens = {
'root': [
# Comments are hashes at the YAML level
(r'#.*', Comment),
# Documents starting with | are LLVM-IR
(r'--- \|$', Keyword, 'llvm_ir'),
# Other documents are MIR
(r'---', Keyword, 'llvm_mir'),
# Consume everything else in one token for efficiency
(r'[^-#]+|.', Text),
],
'llvm_ir': [
# Documents end with '...' or '---'
(r'(\.\.\.|(?=---))', Keyword, '#pop'),
# Delegate to the LlvmLexer
(r'((?:.|\n)+?)(?=(\.\.\.|---))', bygroups(using(LlvmLexer))),
],
'llvm_mir': [
# Comments are hashes at the YAML level
(r'#.*', Comment),
# Documents end with '...' or '---'
(r'(\.\.\.|(?=---))', Keyword, '#pop'),
# Handle the simple attributes
(r'name:', Keyword, 'name'),
(words(('alignment', ),
suffix=':'), Keyword, 'number'),
(words(('legalized', 'regBankSelected', 'tracksRegLiveness',
'selected', 'exposesReturnsTwice'),
suffix=':'), Keyword, 'boolean'),
# Handle the attributes don't highlight inside
(words(('registers', 'stack', 'fixedStack', 'liveins', 'frameInfo',
'machineFunctionInfo'),
suffix=':'), Keyword),
# Delegate the body block to the LlvmMirBodyLexer
(r'body: *\|', Keyword, 'llvm_mir_body'),
# Consume everything else
(r'.+', Text),
(r'\n', Whitespace),
],
'name': [
(r'[^\n]+', Name),
default('#pop'),
],
'boolean': [
(r' *(true|false)', Name.Builtin),
default('#pop'),
],
'number': [
(r' *[0-9]+', Number),
default('#pop'),
],
'llvm_mir_body': [
# Documents end with '...' or '---'.
# We have to pop llvm_mir_body and llvm_mir
(r'(\.\.\.|(?=---))', Keyword, '#pop:2'),
# Delegate the body block to the LlvmMirBodyLexer
(r'((?:.|\n)+?)(?=\.\.\.|---)', bygroups(using(LlvmMirBodyLexer))),
# The '...' is optional. If we didn't already find it then it isn't
# there. There might be a '---' instead though.
(r'(?!\.\.\.|---)((?:.|\n)+)', bygroups(using(LlvmMirBodyLexer))),
],
}
class NasmLexer(RegexLexer):
"""
For Nasm (Intel) assembly code.
"""
name = 'NASM'
aliases = ['nasm']
filenames = ['*.asm', '*.ASM', '*.nasm']
mimetypes = ['text/x-nasm']
# Tasm uses the same file endings, but TASM is not as common as NASM, so
# we prioritize NASM higher by default
priority = 1.0
identifier = r'[a-z$._?][\w$.?#@~]*'
hexn = r'(?:0x[0-9a-f]+|$0[0-9a-f]*|[0-9]+[0-9a-f]*h)'
octn = r'[0-7]+q'
binn = r'[01]+b'
decn = r'[0-9]+'
floatn = decn + r'\.e?' + decn
string = r'"(\\"|[^"\n])*"|' + r"'(\\'|[^'\n])*'|" + r"`(\\`|[^`\n])*`"
declkw = r'(?:res|d)[bwdqt]|times'
register = (r'(r[0-9][0-5]?[bwd]?|'
r'[a-d][lh]|[er]?[a-d]x|[er]?[sb]p|[er]?[sd]i|[c-gs]s|st[0-7]|'
r'mm[0-7]|cr[0-4]|dr[0-367]|tr[3-7]|k[0-7]|'
r'[xyz]mm(?:[12][0-9]?|3[01]?|[04-9]))\b')
wordop = r'seg|wrt|strict|rel|abs'
type = r'byte|[dq]?word'
# Directives must be followed by whitespace, otherwise CPU will match
# cpuid for instance.
directives = (r'(?:BITS|USE16|USE32|SECTION|SEGMENT|ABSOLUTE|EXTERN|GLOBAL|'
r'ORG|ALIGN|STRUC|ENDSTRUC|COMMON|CPU|GROUP|UPPERCASE|IMPORT|'
r'EXPORT|LIBRARY|MODULE)(?=\s)')
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r'^\s*%', Comment.Preproc, 'preproc'),
include('whitespace'),
(identifier + ':', Name.Label),
(r'(%s)(\s+)(equ)' % identifier,
bygroups(Name.Constant, Whitespace, Keyword.Declaration),
'instruction-args'),
(directives, Keyword, 'instruction-args'),
(declkw, Keyword.Declaration, 'instruction-args'),
(identifier, Name.Function, 'instruction-args'),
(r'[\r\n]+', Whitespace)
],
'instruction-args': [
(string, String),
(hexn, Number.Hex),
(octn, Number.Oct),
(binn, Number.Bin),
(floatn, Number.Float),
(decn, Number.Integer),
include('punctuation'),
(register, Name.Builtin),
(identifier, Name.Variable),
(r'[\r\n]+', Whitespace, '#pop'),
include('whitespace')
],
'preproc': [
(r'[^;\n]+', Comment.Preproc),
(r';.*?\n', Comment.Single, '#pop'),
(r'\n', Comment.Preproc, '#pop'),
],
'whitespace': [
(r'\n', Whitespace),
(r'[ \t]+', Whitespace),
(r';.*', Comment.Single),
(r'#.*', Comment.Single)
],
'punctuation': [
(r'[,{}():\[\]]+', Punctuation),
(r'[&|^<>+*/%~-]+', Operator),
(r'[$]+', Keyword.Constant),
(wordop, Operator.Word),
(type, Keyword.Type)
],
}
def analyse_text(text):
# Probably TASM
if re.match(r'PROC', text, re.IGNORECASE):
return False
class NasmObjdumpLexer(ObjdumpLexer):
"""
For the output of ``objdump -d -M intel``.
.. versionadded:: 2.0
"""
name = 'objdump-nasm'
aliases = ['objdump-nasm']
filenames = ['*.objdump-intel']
mimetypes = ['text/x-nasm-objdump']
tokens = _objdump_lexer_tokens(NasmLexer)
class TasmLexer(RegexLexer):
"""
For Tasm (Turbo Assembler) assembly code.
"""
name = 'TASM'
aliases = ['tasm']
filenames = ['*.asm', '*.ASM', '*.tasm']
mimetypes = ['text/x-tasm']
identifier = r'[@a-z$._?][\w$.?#@~]*'
hexn = r'(?:0x[0-9a-f]+|$0[0-9a-f]*|[0-9]+[0-9a-f]*h)'
octn = r'[0-7]+q'
binn = r'[01]+b'
decn = r'[0-9]+'
floatn = decn + r'\.e?' + decn
string = r'"(\\"|[^"\n])*"|' + r"'(\\'|[^'\n])*'|" + r"`(\\`|[^`\n])*`"
declkw = r'(?:res|d)[bwdqt]|times'
register = (r'(r[0-9][0-5]?[bwd]|'
r'[a-d][lh]|[er]?[a-d]x|[er]?[sb]p|[er]?[sd]i|[c-gs]s|st[0-7]|'
r'mm[0-7]|cr[0-4]|dr[0-367]|tr[3-7])\b')
wordop = r'seg|wrt|strict'
type = r'byte|[dq]?word'
directives = (r'BITS|USE16|USE32|SECTION|SEGMENT|ABSOLUTE|EXTERN|GLOBAL|'
r'ORG|ALIGN|STRUC|ENDSTRUC|ENDS|COMMON|CPU|GROUP|UPPERCASE|INCLUDE|'
r'EXPORT|LIBRARY|MODULE|PROC|ENDP|USES|ARG|DATASEG|UDATASEG|END|IDEAL|'
r'P386|MODEL|ASSUME|CODESEG|SIZE')
# T[A-Z][a-z] is more of a convention. Lexer should filter out STRUC definitions
# and then 'add' them to datatype somehow.
datatype = (r'db|dd|dw|T[A-Z][a-z]+')
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r'^\s*%', Comment.Preproc, 'preproc'),
include('whitespace'),
(identifier + ':', Name.Label),
(directives, Keyword, 'instruction-args'),
(r'(%s)(\s+)(%s)' % (identifier, datatype),
bygroups(Name.Constant, Whitespace, Keyword.Declaration),
'instruction-args'),
(declkw, Keyword.Declaration, 'instruction-args'),
(identifier, Name.Function, 'instruction-args'),
(r'[\r\n]+', Whitespace)
],
'instruction-args': [
(string, String),
(hexn, Number.Hex),
(octn, Number.Oct),
(binn, Number.Bin),
(floatn, Number.Float),
(decn, Number.Integer),
include('punctuation'),
(register, Name.Builtin),
(identifier, Name.Variable),
# Do not match newline when it's preceded by a backslash
(r'(\\)(\s*)(;.*)([\r\n])',
bygroups(Text, Whitespace, Comment.Single, Whitespace)),
(r'[\r\n]+', Whitespace, '#pop'),
include('whitespace')
],
'preproc': [
(r'[^;\n]+', Comment.Preproc),
(r';.*?\n', Comment.Single, '#pop'),
(r'\n', Comment.Preproc, '#pop'),
],
'whitespace': [
(r'[\n\r]', Whitespace),
(r'(\\)([\n\r])', bygroups(Text, Whitespace)),
(r'[ \t]+', Whitespace),
(r';.*', Comment.Single)
],
'punctuation': [
(r'[,():\[\]]+', Punctuation),
(r'[&|^<>+*=/%~-]+', Operator),
(r'[$]+', Keyword.Constant),
(wordop, Operator.Word),
(type, Keyword.Type)
],
}
def analyse_text(text):
# See above
if re.match(r'PROC', text, re.I):
return True
class Ca65Lexer(RegexLexer):
"""
For ca65 assembler sources.
.. versionadded:: 1.6
"""
name = 'ca65 assembler'
aliases = ['ca65']
filenames = ['*.s']
flags = re.IGNORECASE
tokens = {
'root': [
(r';.*', Comment.Single),
(r'\s+', Whitespace),
(r'[a-z_.@$][\w.@$]*:', Name.Label),
(r'((ld|st)[axy]|(in|de)[cxy]|asl|lsr|ro[lr]|adc|sbc|cmp|cp[xy]'
r'|cl[cvdi]|se[cdi]|jmp|jsr|bne|beq|bpl|bmi|bvc|bvs|bcc|bcs'
r'|p[lh][ap]|rt[is]|brk|nop|ta[xy]|t[xy]a|txs|tsx|and|ora|eor'
r'|bit)\b', Keyword),
(r'\.\w+', Keyword.Pseudo),
(r'[-+~*/^&|!<>=]', Operator),
(r'"[^"\n]*.', String),
(r"'[^'\n]*.", String.Char),
(r'\$[0-9a-f]+|[0-9a-f]+h\b', Number.Hex),
(r'\d+', Number.Integer),
(r'%[01]+', Number.Bin),
(r'[#,.:()=\[\]]', Punctuation),
(r'[a-z_.@$][\w.@$]*', Name),
]
}
def analyse_text(self, text):
# comments in GAS start with "#"
if re.search(r'^\s*;', text, re.MULTILINE):
return 0.9
class Dasm16Lexer(RegexLexer):
"""
For DCPU-16 Assembly.
.. versionadded:: 2.4
"""
name = 'DASM16'
url = 'http://0x10c.com/doc/dcpu-16.txt'
aliases = ['dasm16']
filenames = ['*.dasm16', '*.dasm']
mimetypes = ['text/x-dasm16']
INSTRUCTIONS = [
'SET',
'ADD', 'SUB',
'MUL', 'MLI',
'DIV', 'DVI',
'MOD', 'MDI',
'AND', 'BOR', 'XOR',
'SHR', 'ASR', 'SHL',
'IFB', 'IFC', 'IFE', 'IFN', 'IFG', 'IFA', 'IFL', 'IFU',
'ADX', 'SBX',
'STI', 'STD',
'JSR',
'INT', 'IAG', 'IAS', 'RFI', 'IAQ', 'HWN', 'HWQ', 'HWI',
]
REGISTERS = [
'A', 'B', 'C',
'X', 'Y', 'Z',
'I', 'J',
'SP', 'PC', 'EX',
'POP', 'PEEK', 'PUSH'
]
# Regexes yo
char = r'[a-zA-Z0-9_$@.]'
identifier = r'(?:[a-zA-Z$_]' + char + r'*|\.' + char + '+)'
number = r'[+-]?(?:0[xX][a-zA-Z0-9]+|\d+)'
binary_number = r'0b[01_]+'
instruction = r'(?i)(' + '|'.join(INSTRUCTIONS) + ')'
single_char = r"'\\?" + char + "'"
string = r'"(\\"|[^"])*"'
def guess_identifier(lexer, match):
ident = match.group(0)
klass = Name.Variable if ident.upper() in lexer.REGISTERS else Name.Label
yield match.start(), klass, ident
tokens = {
'root': [
include('whitespace'),
(':' + identifier, Name.Label),
(identifier + ':', Name.Label),
(instruction, Name.Function, 'instruction-args'),
(r'\.' + identifier, Name.Function, 'data-args'),
(r'[\r\n]+', Whitespace)
],
'numeric' : [
(binary_number, Number.Integer),
(number, Number.Integer),
(single_char, String),
],
'arg' : [
(identifier, guess_identifier),
include('numeric')
],
'deref' : [
(r'\+', Punctuation),
(r'\]', Punctuation, '#pop'),
include('arg'),
include('whitespace')
],
'instruction-line' : [
(r'[\r\n]+', Whitespace, '#pop'),
(r';.*?$', Comment, '#pop'),
include('whitespace')
],
'instruction-args': [
(r',', Punctuation),
(r'\[', Punctuation, 'deref'),
include('arg'),
include('instruction-line')
],
'data-args' : [
(r',', Punctuation),
include('numeric'),
(string, String),
include('instruction-line')
],
'whitespace': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
(r';.*?\n', Comment)
],
}
|
a11aa6e3fcc48f7e195565f68cc115e8f2433178
|
f27e3fdc97290b1db6d3fa7039ad59e4f8b5a760
|
/tensorflow-1/comet-tensorflow-char-rnn-example.py
|
30ec29b7af5bd19324a911bfe6a1a1a3cb052e77
|
[] |
no_license
|
comet-ml/comet-examples
|
9c7bcea8b97986fb7987cbe0f4533f619e2a0939
|
9da5d4f296e633bb7e63b47dc2d3f7a0780c0a4e
|
refs/heads/master
| 2023-08-19T03:32:51.864273
| 2023-08-09T09:30:34
| 2023-08-09T09:30:34
| 158,587,515
| 134
| 55
| null | 2023-09-13T16:58:41
| 2018-11-21T18:00:34
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 7,045
|
py
|
comet-tensorflow-char-rnn-example.py
|
# coding: utf-8
"""
Example adapted from Vanilla Char-RNN using TensorFlow by Vinh Khuc (@knvinh) https://gist.github.com/vinhkhuc/7ec5bf797308279dc587.
Adapted from Karpathy's min-char-rnn.py
https://gist.github.com/karpathy/d4dee566867f8291f086
Requires tensorflow>=1.0
BSD License
"""
# import comet_ml at the top of your file
from comet_ml import Experiment
# create an experiment with your api key
import os
# Setting the API key (saved as environment variable)
experiment = Experiment(
#api_key="YOUR API KEY",
# or
api_key=os.environ.get("COMET_API_KEY"),
project_name='comet-examples')
import random
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import init_ops
import os
import re
seed_value = 42
tf.set_random_seed(seed_value)
random.seed(seed_value)
# Generative Model using Dracula by Bram Stoker
book_path = os.path.join('data', 'dracula.txt')
with open(book_path, encoding='utf_8') as f:
my_text = f.read() # will read until it reaches an error
print('Length of data is {}, type of data is {}\n'.format(
len(my_text), type(my_text)))
def simple_clean(text):
text = text.lower()
text = re.sub(r'[\r+\n+\*"]', ' ', text)
text = re.sub(r'[\(__\)]', ' ', text) # parentheticals
text = re.sub(r'([0-9]+:[0-9]+)', '#', text) # time in o'clock
text = re.sub(r'[0-9]+', '#', text) # all numbers
text = re.sub(r'[pa]\.\s*m\.', ' ', text) # a.m. or p.m.
text = re.sub(r'-+', ' ', text)
text = re.sub(r'\s{2,}', ' ', text) # renormalize spaces
return text
data = simple_clean(my_text)
# compile index
chars = sorted(list(set(data)))
data_size, vocab_size = len(data), len(chars)
print('Data has %d characters, %d unique.\n' % (data_size, vocab_size))
char_to_ix = {ch: i for i, ch in enumerate(chars)}
ix_to_char = {i: ch for i, ch in enumerate(chars)}
print(ix_to_char)
print(char_to_ix)
# let's now try to delete some of these special chars from the text and reindex
special = {3, 4, 5, 6, 9, 12, 13, 15, 42, 43, 44}
def more_clean(dictionary, keys, text):
new_text = text[:]
for key in keys:
new_text = re.sub(re.escape(dictionary.get(key)), " ", new_text)
new_text = re.sub(r'\s+', " ", new_text)
return new_text
data = more_clean(ix_to_char, special, data)
chars = sorted(list(set(data)))
data_size, vocab_size = len(data), len(chars)
print('Data has %d characters, %d unique.\n' % (data_size, vocab_size))
char_to_ix = {ch: i for i, ch in enumerate(chars)}
ix_to_char = {i: ch for i, ch in enumerate(chars)}
tf.reset_default_graph()
# Hyper-parameters, initialization
hidden_size = 300 # number of hidden neurons
seq_length = 45 # number of steps to unroll
# log your parameters to Comet.ml!
params = {"len(chars)": len(chars),
"text": "Dracula",
"hidden_size": hidden_size,
"seq_length": seq_length
}
experiment.log_parameters(params)
inputs = tf.placeholder(shape=[None, vocab_size],
dtype=tf.float32, name="inputs")
targets = tf.placeholder(
shape=[None, vocab_size], dtype=tf.float32, name="targets")
init_state = tf.placeholder(
shape=[1, hidden_size], dtype=tf.float32, name="state")
initializer = tf.random_normal_initializer(stddev=0.1)
bias_initializer = tf.random_normal_initializer(mean=0.5, stddev=0.5)
# Model building preliminaries
# RNN built out explicitly, w/o using predefined TF functions!!
with tf.variable_scope("RNN") as scope:
hs_t = init_state
ys = []
for t, xs_t in enumerate(tf.split(inputs, seq_length, axis=0)):
if t > 0:
scope.reuse_variables() # Reuse variables
Wxh = tf.get_variable(
"Wxh", [vocab_size, hidden_size], initializer=initializer)
Whh = tf.get_variable(
"Whh", [hidden_size, hidden_size], initializer=initializer)
Why = tf.get_variable(
"Why", [hidden_size, vocab_size], initializer=initializer)
bh = tf.get_variable("bh", [hidden_size], initializer=bias_initializer)
by = tf.get_variable("by", [vocab_size], initializer=bias_initializer)
hs_t = tf.tanh(tf.matmul(xs_t, Wxh) + tf.matmul(hs_t, Whh) + bh)
ys_t = tf.matmul(hs_t, Why) + by
ys.append(ys_t)
hprev = hs_t
output_softmax = tf.nn.softmax(ys[-1]) # Get softmax for sampling
outputs = tf.concat(ys, axis=0)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels=targets, logits=outputs))
# Minimizer
minimizer = tf.train.AdamOptimizer(epsilon=0.1)
grads_and_vars = minimizer.compute_gradients(loss)
# Gradient clipping
grad_clipping = tf.constant(5.0, name="grad_clipping")
clipped_grads_and_vars = []
for grad, var in grads_and_vars:
clipped_grad = tf.clip_by_value(grad, -grad_clipping, grad_clipping)
clipped_grads_and_vars.append((clipped_grad, var))
# Gradient updates
updates = minimizer.apply_gradients(clipped_grads_and_vars)
def one_hot(v):
return np.eye(vocab_size)[v]
# begin training
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
# log model graph
experiment.set_model_graph(sess.graph)
# Initial values
MAXITERS = 500000
n, p = 0, 0
hprev_val = np.zeros([1, hidden_size])
while (n < MAXITERS):
# Initialize
if p + seq_length + 1 >= len(data) or n == 0:
hprev_val = np.zeros([1, hidden_size])
p = 0 # reset
# Prepare inputs
input_vals = [char_to_ix[ch] for ch in data[p:p + seq_length]]
target_vals = [char_to_ix[ch] for ch in data[p + 1:p + seq_length + 1]]
input_vals = one_hot(input_vals)
target_vals = one_hot(target_vals)
hprev_val, loss_val, _ = sess.run([hprev, loss, updates],
feed_dict={inputs: input_vals,
targets: target_vals,
init_state: hprev_val})
# log the loss to Comet.ml
experiment.log_metric("loss", loss_val, step=n)
if n % 500 == 0:
# Log Progress
print('iter: %d, p: %d, loss: %f' % (n, p, loss_val))
# Do sampling
sample_length = 200
start_ix = random.randint(0, len(data) - seq_length)
sample_seq_ix = [char_to_ix[ch]
for ch in data[start_ix:start_ix + seq_length]]
ixes = []
sample_prev_state_val = np.copy(hprev_val)
for t in range(sample_length):
sample_input_vals = one_hot(sample_seq_ix)
sample_output_softmax_val, sample_prev_state_val = sess.run([output_softmax, hprev],
feed_dict={inputs: sample_input_vals, init_state: sample_prev_state_val})
ix = np.random.choice(
range(vocab_size), p=sample_output_softmax_val.ravel())
ixes.append(ix)
sample_seq_ix = sample_seq_ix[1:] + [ix]
txt = ''.join(ix_to_char[ix] for ix in ixes)
print('----\n %s \n----\n' % (txt,))
p += seq_length
n += 1
|
782a6c909d41ab83a8c31f6c1c0d62831a9ff9b6
|
49a386b7e54eb305f2332c43f592b30c9d8dad9a
|
/tests/test_view_models.py
|
b0ee86697dd46aea52b697933d67b22673bc6fc5
|
[
"MIT"
] |
permissive
|
SectorLabs/django-postgres-extra
|
e17b81ab7bc6187586be1caf021bc07ca8a6550f
|
e5503cb3f3c1b7959bd55253d3a79296f4c8f0ef
|
refs/heads/master
| 2023-08-30T16:08:49.899832
| 2023-08-25T09:54:12
| 2023-08-25T10:26:55
| 80,707,765
| 645
| 87
|
MIT
| 2023-08-21T10:02:48
| 2017-02-02T08:46:26
|
Python
|
UTF-8
|
Python
| false
| false
| 3,609
|
py
|
test_view_models.py
|
import pytest
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.test.utils import override_settings
from psqlextra.models import PostgresMaterializedViewModel, PostgresViewModel
from .fake_model import define_fake_model, define_fake_view_model
@pytest.mark.parametrize(
"model_base", [PostgresViewModel, PostgresMaterializedViewModel]
)
@override_settings(POSTGRES_EXTRA_ANNOTATE_SQL=True)
def test_view_model_meta_query_set(model_base):
"""Tests whether you can set a :see:QuerySet to be used as the underlying
query for a view."""
model = define_fake_model({"name": models.TextField()})
view_model = define_fake_view_model(
{"name": models.TextField()},
model_base=model_base,
view_options={"query": model.objects.all()},
)
expected_sql = 'SELECT "{0}"."id", "{0}"."name" FROM "{0}"'.format(
model._meta.db_table
)
assert view_model._view_meta.query[0].startswith(expected_sql + " /* ")
assert view_model._view_meta.query[1] == tuple()
@pytest.mark.parametrize(
"model_base", [PostgresViewModel, PostgresMaterializedViewModel]
)
@pytest.mark.parametrize("bind_params", [("test",), ["test"]])
def test_view_model_meta_sql_with_params(model_base, bind_params):
"""Tests whether you can set a raw SQL query with a tuple of bind params as
the underlying query for a view."""
model = define_fake_model({"name": models.TextField()})
sql = "select * from %s where name = %s" % (model._meta.db_table, "%s")
sql_with_params = (sql, bind_params)
view_model = define_fake_view_model(
{"name": models.TextField()},
model_base=model_base,
view_options={"query": sql_with_params},
)
assert view_model._view_meta.query == sql_with_params
@pytest.mark.parametrize(
"model_base", [PostgresViewModel, PostgresMaterializedViewModel]
)
def test_view_model_meta_sql_with_named_params(model_base):
"""Tests whether you can set a raw SQL query with a tuple of bind params as
the underlying query for a view."""
model = define_fake_model({"name": models.TextField()})
sql = "select * from " + model._meta.db_table + " where name = %(name)s"
sql_with_params = (sql, dict(name="test"))
view_model = define_fake_view_model(
{"name": models.TextField()},
model_base=model_base,
view_options={"query": sql_with_params},
)
assert view_model._view_meta.query == sql_with_params
@pytest.mark.parametrize(
"model_base", [PostgresViewModel, PostgresMaterializedViewModel]
)
def test_view_model_meta_sql(model_base):
"""Tests whether you can set a raw SQL query without any params."""
sql = "select 1"
view_model = define_fake_view_model(
{"name": models.TextField()},
model_base=model_base,
view_options={"query": sql},
)
assert view_model._view_meta.query == (sql, tuple())
@pytest.mark.parametrize(
"model_base", [PostgresViewModel, PostgresMaterializedViewModel]
)
@pytest.mark.parametrize(
"view_query",
[
dict(a=1),
tuple("test"),
("test", None),
(None, None),
(1, 2),
("select 1", ("a", "b"), "onetoomay"),
],
)
def test_view_model_meta_bad_query(model_base, view_query):
"""Tests whether a bad view query configuration raises and error."""
with pytest.raises(ImproperlyConfigured):
define_fake_view_model(
{"name": models.TextField()},
model_base=model_base,
view_options={"query": view_query},
)
|
a2ead41d161926b7473ecbcf948947dc55f91222
|
450916eee7580beb928ed8f387db4f0a8c1aa508
|
/src/amuse/community/ph4/test_multiples2.py
|
6b074ee2f22ea896a8d22ea347f4dd733f4e6bda
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
amusecode/amuse
|
42095545893f5a86ea79c2a52ce54d3ce8eb204f
|
b57c1e2fda1457d5025307be105c2aa59b19b574
|
refs/heads/main
| 2023-08-31T04:50:48.880044
| 2023-08-30T12:00:20
| 2023-08-30T12:00:20
| 18,516,331
| 158
| 118
|
Apache-2.0
| 2023-08-30T12:00:22
| 2014-04-07T12:35:07
|
AMPL
|
UTF-8
|
Python
| false
| false
| 17,202
|
py
|
test_multiples2.py
|
import collections
import getopt
import numpy
import os
import random
import sys
import unittest
from time import process_time as clock
from amuse.community.ph4.interface import ph4 as grav
from amuse.community.smalln.interface import SmallN
from amuse.community.kepler.interface import Kepler
from amuse.couple import multiples
from amuse.units import nbody_system
from amuse.units import units
from amuse.units import quantities
from amuse import datamodel
from amuse.datamodel import particle_attributes as pa
from amuse.rfi.core import is_mpd_running
from amuse.ic.plummer import new_plummer_model
from amuse.ic.salpeter import new_salpeter_mass_distribution_nbody
def print_log(pre, time, gravity, E0 = 0.0 | nbody_system.energy, cpu0 = 0.0):
cpu = clock()
N = len(gravity.particles)
M = gravity.total_mass
U = gravity.potential_energy
T = gravity.kinetic_energy
Etop = T + U
Nmul, Nbin, Emul = gravity.get_total_multiple_energy()
tmp1,tmp2,Emul2 = gravity.get_total_multiple_energy2()
Etot = Etop + Emul
Eext = gravity.multiples_external_tidal_correction
Eint = gravity.multiples_internal_tidal_correction
Eerr = gravity.multiples_integration_energy_error
Edel = gravity.multiples_external_tidal_correction \
+ gravity.multiples_internal_tidal_correction \
+ gravity.multiples_integration_energy_error
Ecor = Etot - Edel
if E0 == 0 | nbody_system.energy: E0 = Ecor
Rvir = -0.5*M*M/U
Q = -T/U
com = pa.center_of_mass(gravity.particles)
comv = pa.center_of_mass_velocity(gravity.particles)
dcen,rcore,rhocore = pa.densitycentre_coreradius_coredens(gravity.particles)
cmx,cmy,cmz = dcen
lagr,mf = pa.LagrangianRadii(gravity.particles, cm=dcen) # no units!
print('')
print(pre+"time=", time.number)
print(pre+"CPU=", cpu - cpu0)
print(pre+"Ntot=", N)
print(pre+"mass=", M.number)
print(pre+"Etot=", Etot.number)
print(pre+"Etop=", Etop.number)
print(pre+"Eext=", Eext.number)
print(pre+"Eint=", Eint.number)
print(pre+"Eerr=", Eerr.number)
print(pre+"Edel=", Edel.number)
print(pre+"Ecor=", Ecor.number)
print(pre+"dE/E=", Ecor/E0 - 1)
print(pre+"Rvir=", Rvir.number)
print(pre+"Qvir=", Q)
cmx,cmy,cmz = com
print(pre+"cmpos[3]= %.8f %.8f %.8f" % (cmx.number, cmy.number, cmz.number))
cmx,cmy,cmz = comv
print(pre+"cmvel[3]= %.8f %.8f %.8f" % (cmx.number, cmy.number, cmz.number))
cmx,cmy,cmz = dcen
print(pre+"dcpos[3]= %.8f %.8f %.8f" % (cmx.number, cmy.number, cmz.number))
print(pre+"Rcore=", rcore.number)
print(pre+"Mcore=", (rhocore*rcore**3).number) # fake...
print(pre+"Mlagr[9]=", end=' ')
for m in mf: print("%.4f" % (m), end=' ')
print('')
print(pre+"Rlagr[9]=", end=' ')
for r in lagr.number: print("%.8f" % (r), end=' ')
print('')
kT = T/N
Nmul,Nbin,Emul = gravity.print_multiples2(pre, kT, dcen)
print(pre+"Nmul=", Nmul)
print(pre+"Nbin=", Nbin)
print(pre+"Emul= %.5f" % (Emul.number))
print(pre+"Emul2= %.5f" % (Emul2.number))
print(pre+"Emul/kT= %.5f" % (Emul.number/kT.number))
print(pre+"Emul/E= %.5f" % (Emul.number/Etot.number))
print('')
sys.stdout.flush()
return Ecor,cpu
SMALLN = None
def new_smalln():
SMALLN.reset()
return SMALLN
def init_smalln():
global SMALLN
sys.stdout.flush()
SMALLN = SmallN()
sys.stdout.flush()
SMALLN.parameters.timestep_parameter = 0.1
#SMALLN.parameters.cm_index = 2001 # don't set this here!!
sys.stdout.flush()
def init_kepler(star1, star2):
try:
star1.mass.value_in(units.kg) # see if SI units, throw exception if not
unit_converter \
= nbody_system.nbody_to_si(star1.mass + star2.mass,
(star2.position-star1.position).length())
except Exception as ex:
unit_converter = None
kep = Kepler(unit_converter, redirection = "none")
kep.initialize_code()
return kep
def run_ph4(infile = None, outfile = None,
number_of_stars = 100, number_of_binaries = 0,
end_time = 10 | nbody_system.time,
delta_t = 1 | nbody_system.time,
n_workers = 1, use_gpu = 1, gpu_worker = 1,
salpeter = 0,
accuracy_parameter = 0.1,
softening_length = 0.0 | nbody_system.length,
manage_encounters = 1, random_seed = 1234,
debug_level = 1):
if random_seed <= 0:
numpy.random.seed()
random_seed = numpy.random.randint(1, pow(2,31)-1)
numpy.random.seed(random_seed)
print("random seed =", random_seed)
if infile != None: print("input file =", infile)
print("end_time =", end_time)
print("delta_t =", delta_t)
print("n_workers =", n_workers)
print("use_gpu =", use_gpu)
print("manage_encounters =", manage_encounters)
print("\ninitializing the gravity module")
sys.stdout.flush()
init_smalln()
# Note that there are actually three GPU options:
#
# 1. use the GPU code and allow GPU use (default)
# 2. use the GPU code but disable GPU use (-g)
# 3. use the non-GPU code (-G)
if gpu_worker == 1:
try:
gravity = grav(number_of_workers = n_workers,
redirection = "none", mode = "gpu")
except Exception as ex:
gravity = grav(number_of_workers = n_workers,
redirection = "none")
else:
gravity = grav(number_of_workers = n_workers,
redirection = "none")
gravity.initialize_code()
gravity.parameters.set_defaults()
#-----------------------------------------------------------------
if infile == None:
print("making a Plummer model")
stars = new_plummer_model(number_of_stars)
id = numpy.arange(number_of_stars)
stars.id = id+1
print("setting particle masses and radii")
if salpeter == 0:
print('equal masses')
total_mass = 1.0 | nbody_system.mass
scaled_mass = total_mass / number_of_stars
else:
print('salpeter mass function')
scaled_mass = new_salpeter_mass_distribution_nbody(number_of_stars)
stars.mass = scaled_mass
print("centering stars")
stars.move_to_center()
print("scaling stars to virial equilibrium")
stars.scale_to_standard(smoothing_length_squared
= gravity.parameters.epsilon_squared)
else:
# Read the input data. Units are dynamical (sorry).
# Format: id mass pos[3] vel[3]
print("reading file", infile)
id = []
mass = []
pos = []
vel = []
f = open(infile, 'r')
count = 0
for line in f:
if len(line) > 0:
count += 1
cols = line.split()
if count == 1: snap = int(cols[0])
elif count == 2: number_of_stars = int(cols[0])
elif count == 3: time = float(cols[0]) | nbody_system.time
else:
if len(cols) >= 8:
id.append(int(cols[0]))
mass.append(float(cols[1]))
pos.append((float(cols[2]),
float(cols[3]), float(cols[4])))
vel.append((float(cols[5]),
float(cols[6]), float(cols[7])))
f.close()
stars = datamodel.Particles(number_of_stars)
stars.id = id
stars.mass = mass | nbody_system.mass
stars.position = pos | nbody_system.length
stars.velocity = vel | nbody_system.speed
#stars.radius = 0. | nbody_system.length
total_mass = stars.mass.sum()
ke = pa.kinetic_energy(stars)
kT = ke/(1.5*number_of_stars)
if number_of_binaries > 0:
# Turn selected stars into binary components.
# Only tested for equal-mass case.
kep = Kepler(redirection = "none")
kep.initialize_code()
added_mass = 0.0 | nbody_system.mass
# Work with energies rather than semimajor axes.
Emin = 10*kT
Emax = 20*kT
ecc = 0.1
nbin = 0
companion_base_id = 100*(number_of_stars//10)
for i in range(0, number_of_stars,
number_of_stars//number_of_binaries):
# Star i is CM, becomes component, add other star at end.
nbin += 1
mass = stars[i].mass
new_mass = numpy.random.uniform()*mass # uniform q
mbin = mass + new_mass
fac = new_mass/mbin
E = Emin + numpy.random.uniform()*(Emax-Emin)
a = 0.5*nbody_system.G*mass*new_mass/E
kep.initialize_from_elements(mbin, a, ecc)
# Binaries should be approaching in order to be picked up
# by multiples.
kep.advance_to_apastron()
kep.advance_to_radius(a)
dr = quantities.AdaptingVectorQuantity()
dr.extend(kep.get_separation_vector())
dv = quantities.AdaptingVectorQuantity()
dv.extend(kep.get_velocity_vector())
newstar = datamodel.Particles(1)
newstar.mass = new_mass
newstar.position = stars[i].position + (1-fac)*dr
newstar.velocity = stars[i].velocity + (1-fac)*dv
stars[i].position = stars[i].position - fac*dr
stars[i].velocity = stars[i].velocity - fac*dv
newstar.id = companion_base_id + stars[i].id
stars.add_particles(newstar)
added_mass += new_mass
if nbin >= number_of_binaries: break
kep.stop()
print('created', nbin, 'binaries')
sys.stdout.flush()
stars.mass = stars.mass * total_mass/(total_mass+added_mass)
number_of_stars += nbin
# Set dynamical radii (assuming virial equilibrium and standard
# units). Note that this choice should be refined, and updated
# as the system evolves. Probably the choice of radius should be
# made entirely in the multiples module. TODO. In these units,
# M = 1 and <v^2> = 0.5, so the mean 90-degree turnaround impact
# parameter is
#
# b_90 = G (m_1+m_2) / vrel^2
# = 2 <m> / 2<v^2>
# = 2 / N for equal masses
#
# Taking r_i = m_i / 2<v^2> = m_i in virial equilibrium means
# that, approximately, "contact" means a 90-degree deflection (r_1
# + r_2 = b_90). A more conservative choice with r_i less than
# this value will isolate encounters better, but also place more
# load on the large-N dynamical module.
stars.radius = stars.mass.number | nbody_system.length
time = 0.0 | nbody_system.time
# print "IDs:", stars.id.number
print("recentering stars")
stars.move_to_center()
sys.stdout.flush()
#-----------------------------------------------------------------
if softening_length < 0.0 | nbody_system.length:
# Use ~interparticle spacing. Assuming standard units here. TODO
eps2 = 0.25*(float(number_of_stars))**(-0.666667) \
| nbody_system.length**2
else:
eps2 = softening_length*softening_length
print('softening length =', eps2.sqrt())
gravity.parameters.timestep_parameter = accuracy_parameter
gravity.parameters.epsilon_squared = eps2
gravity.parameters.use_gpu = use_gpu
# gravity.parameters.manage_encounters = manage_encounters
print('')
print("adding particles")
# print stars
sys.stdout.flush()
gravity.particles.add_particles(stars)
gravity.commit_particles()
print('')
print("number_of_stars =", number_of_stars)
sys.stdout.flush()
# Channel to copy values from the code to the set in memory.
channel = gravity.particles.new_channel_to(stars)
stopping_condition = gravity.stopping_conditions.collision_detection
stopping_condition.enable()
# Debugging: prevent the multiples code from being called.
if 0:
stopping_condition.disable()
print('stopping condition disabled')
sys.stdout.flush()
# -----------------------------------------------------------------
# Create the coupled code and integrate the system to the desired
# time, managing interactions internally.
kep = init_kepler(stars[0], stars[1])
multiples_code = multiples.Multiples(gravity, new_smalln, kep)
multiples_code.neighbor_perturbation_limit = 0.1
#multiples_code.neighbor_distance_factor = 2.0
multiples_code.neighbor_veto = True
multiples_code.global_debug = debug_level
print('')
print('multiples_code.initial_scale_factor =', \
multiples_code.initial_scale_factor)
print('multiples_code.neighbor_perturbation_limit =', \
multiples_code.neighbor_perturbation_limit)
print('multiples_code.neighbor_veto =', \
multiples_code.neighbor_veto)
print('multiples_code.final_scale_factor =', \
multiples_code.final_scale_factor)
print('multiples_code.initial_scatter_factor =', \
multiples_code.initial_scatter_factor)
print('multiples_code.final_scatter_factor =', \
multiples_code.final_scatter_factor)
print('multiples_code.retain_binary_apocenter =', \
multiples_code.retain_binary_apocenter)
print('multiples_code.wide_perturbation_limit =', \
multiples_code.wide_perturbation_limit)
# Find initial binaries.
gravity.parameters.zero_step_mode = 1
print('\nidentifying initial binaries')
multiples_code.evolve_model(time)
gravity.parameters.zero_step_mode = 0
pre = "%%% "
E0,cpu0 = print_log(pre, time, multiples_code)
print("evolving to time =", end_time, \
"in steps of", delta_t)
while time < end_time:
time += delta_t
multiples_code.evolve_model(time)
# Copy values from the module to the set in memory.
channel.copy()
# Copy the index (ID) as used in the module to the id field in
# memory. The index is not copied by default, as different
# codes may have different indices for the same particle and
# we don't want to overwrite silently.
channel.copy_attribute("index_in_code", "id")
print_log(pre, time, multiples_code, E0, cpu0)
sys.stdout.flush()
#-----------------------------------------------------------------
if not outfile == None:
# Write data to a file.
f = open(outfile, 'w')
#--------------------------------------------------
# Need to save top-level stellar data and parameters.
# Need to save multiple data and parameters.
f.write('%.15g\n'%(time.number))
for s in multiples_code.stars: write_star(s, f)
#--------------------------------------------------
f.close()
print('wrote file', outfile)
print('')
gravity.stop()
def write_star(s, f):
x,y,z = s.position.number
vx,vy,vz = s.velocity.number
f.write('%d %.15g %.15g %.15g %.15g %.15g %.15g %.15g\n' \
%(s.id, s.mass.number, x, y, z, vx, vy, vz))
if __name__ == '__main__':
print('\ncommand line:', end=' ')
for a in sys.argv: print(a, end=' ')
print('\n')
infile = None
outfile = None
N = 100
Nbin = 10
t_end = 10.0 | nbody_system.time
delta_t = 1.0 | nbody_system.time
n_workers = 2
use_gpu = 0
gpu_worker = 0
salpeter = 0
accuracy_parameter = 0.1
softening_length = 0 | nbody_system.length
random_seed = 42
manage_encounters = 1
debug_level = 1
try:
opts, args = getopt.getopt(sys.argv[1:], "a:b:c:d:D:e:f:-F:gGn:s:St:w:")
except getopt.GetoptError as err:
print(str(err))
sys.exit(1)
for o, a in opts:
if o == "-a":
accuracy_parameter = float(a)
elif o == "-b":
Nbin = int(a)
elif o == "-c":
manage_encounters = int(a)
elif o == "-d":
delta_t = float(a) | nbody_system.time
elif o == "-D":
debug_level = int(a)
elif o == "-e":
softening_length = float(a) | nbody_system.length
elif o == "-f":
infile = a
elif o == "-F":
outfile = a
elif o == "-g":
use_gpu = 0
elif o == "-G":
use_gpu = 0
gpu_worker = 0
elif o == "-n":
N = int(a)
elif o == "-s":
random_seed = int(a)
elif o == "-S":
salpeter = 1
elif o == "-t":
t_end = float(a) | nbody_system.time
elif o == "-w":
n_workers = int(a)
else:
print("unexpected argument", o)
assert is_mpd_running()
run_ph4(infile, outfile,
N, Nbin, t_end, delta_t, n_workers,
use_gpu, gpu_worker,
salpeter, accuracy_parameter, softening_length,
manage_encounters, random_seed, debug_level)
|
a26b78e4101902c25b5b12a182b976dde480fda9
|
d0900eecc0833e3ee315ad143780c34e22b0075f
|
/part2/protocols/bin_mem.py
|
5d0c6cef71396ea2a808bcbbfed2e907f508b131
|
[
"Apache-2.0"
] |
permissive
|
RandyAbernethy/ThriftBook
|
cd52d9439aa6ca33eb7e41c9ef5a16d3e5950716
|
1f6a6fde89cb5609d781924a2fa527ebad1138ab
|
refs/heads/master
| 2023-01-12T13:29:52.934900
| 2023-01-03T22:49:13
| 2023-01-03T22:49:13
| 18,453,393
| 112
| 46
|
Apache-2.0
| 2023-01-03T23:00:38
| 2014-04-04T21:33:26
|
Java
|
UTF-8
|
Python
| false
| false
| 428
|
py
|
bin_mem.py
|
# Apache Thrift Binary Protocol in Python
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
trans = TTransport.TMemoryBuffer()
proto = TBinaryProtocol.TBinaryProtocol(trans)
proto.writeString("Hello Thrift Serialization")
print ("Wrote %d bytes to the TMemoryBuffer" % (trans.cstringio_buf.tell()))
trans.cstringio_buf.seek(0)
msg = proto.readString()
print("Recovered string: %s" % (msg))
|
4d48aeb1b5a293b4eeba3c6ea9eeda118b3309ca
|
800cb6b7433ee7f74ee727522033060037c8432f
|
/aiohttp_sse/__init__.py
|
1b04105299080fe472f24a9f71392a0dd65c647d
|
[
"Apache-2.0"
] |
permissive
|
aio-libs/aiohttp-sse
|
27983ab53184e9a2e9c88c53183d7785113119bf
|
eeba5dd054b4f74c7e3a582ab013ab7528969b77
|
refs/heads/master
| 2023-08-04T12:26:09.054730
| 2023-07-31T21:34:53
| 2023-07-31T21:34:53
| 31,738,506
| 163
| 35
|
NOASSERTION
| 2023-09-11T16:29:50
| 2015-03-05T21:50:47
|
Python
|
UTF-8
|
Python
| false
| false
| 6,420
|
py
|
__init__.py
|
import asyncio
import contextlib
import io
import re
from aiohttp.web import HTTPMethodNotAllowed, StreamResponse
from .helpers import _ContextManager
__version__ = "2.1.0"
__all__ = ["EventSourceResponse", "sse_response"]
class EventSourceResponse(StreamResponse):
"""This object could be used as regular aiohttp response for
streaming data to client, usually browser with EventSource::
async def hello(request):
# create response object
resp = await EventSourceResponse()
async with resp:
# stream data
resp.send('foo')
return resp
"""
DEFAULT_PING_INTERVAL = 15
DEFAULT_SEPARATOR = "\r\n"
LINE_SEP_EXPR = re.compile(r"\r\n|\r|\n")
def __init__(self, *, status=200, reason=None, headers=None, sep=None):
super().__init__(status=status, reason=reason)
if headers is not None:
self.headers.extend(headers)
# mandatory for servers-sent events headers
self.headers["Content-Type"] = "text/event-stream"
self.headers["Cache-Control"] = "no-cache"
self.headers["Connection"] = "keep-alive"
self.headers["X-Accel-Buffering"] = "no"
self._ping_interval = self.DEFAULT_PING_INTERVAL
self._ping_task = None
self._sep = sep if sep is not None else self.DEFAULT_SEPARATOR
async def _prepare(self, request):
await self.prepare(request)
return self
async def prepare(self, request):
"""Prepare for streaming and send HTTP headers.
:param request: regular aiohttp.web.Request.
"""
if request.method != "GET":
raise HTTPMethodNotAllowed(request.method, ["GET"])
if not self.prepared:
writer = await super().prepare(request)
self._ping_task = asyncio.create_task(self._ping())
# explicitly enabling chunked encoding, since content length
# usually not known beforehand.
self.enable_chunked_encoding()
return writer
else:
# hackish way to check if connection alive
# should be updated once we have proper API in aiohttp
# https://github.com/aio-libs/aiohttp/issues/3105
if request.protocol.transport is None:
# request disconnected
raise asyncio.CancelledError()
async def send(self, data, id=None, event=None, retry=None):
"""Send data using EventSource protocol
:param str data: The data field for the message.
:param str id: The event ID to set the EventSource object's last
event ID value to.
:param str event: The event's type. If this is specified, an event will
be dispatched on the browser to the listener for the specified
event name; the web site would use addEventListener() to listen
for named events. The default event type is "message".
:param int retry: The reconnection time to use when attempting to send
the event. [What code handles this?] This must be an integer,
specifying the reconnection time in milliseconds. If a non-integer
value is specified, the field is ignored.
"""
buffer = io.StringIO()
if id is not None:
buffer.write(self.LINE_SEP_EXPR.sub("", f"id: {id}"))
buffer.write(self._sep)
if event is not None:
buffer.write(self.LINE_SEP_EXPR.sub("", f"event: {event}"))
buffer.write(self._sep)
for chunk in self.LINE_SEP_EXPR.split(data):
buffer.write(f"data: {chunk}")
buffer.write(self._sep)
if retry is not None:
if not isinstance(retry, int):
raise TypeError("retry argument must be int")
buffer.write(f"retry: {retry}")
buffer.write(self._sep)
buffer.write(self._sep)
await self.write(buffer.getvalue().encode("utf-8"))
async def wait(self):
"""EventSourceResponse object is used for streaming data to the client,
this method returns future, so we can wait until connection will
be closed or other task explicitly call ``stop_streaming`` method.
"""
if self._ping_task is None:
raise RuntimeError("Response is not started")
with contextlib.suppress(asyncio.CancelledError):
await self._ping_task
def stop_streaming(self):
"""Used in conjunction with ``wait`` could be called from other task
to notify client that server no longer wants to stream anything.
"""
if self._ping_task is None:
raise RuntimeError("Response is not started")
self._ping_task.cancel()
def enable_compression(self, force=False):
raise NotImplementedError
@property
def ping_interval(self):
"""Time interval between two ping massages"""
return self._ping_interval
@ping_interval.setter
def ping_interval(self, value):
"""Setter for ping_interval property.
:param int value: interval in sec between two ping values.
"""
if not isinstance(value, int):
raise TypeError("ping interval must be int")
if value < 0:
raise ValueError("ping interval must be greater then 0")
self._ping_interval = value
async def _ping(self):
# periodically send ping to the browser. Any message that
# starts with ":" colon ignored by a browser and could be used
# as ping message.
while True:
await asyncio.sleep(self._ping_interval)
await self.write(": ping{0}{0}".format(self._sep).encode("utf-8"))
async def __aenter__(self):
return self
async def __aexit__(self, *args):
self.stop_streaming()
await self.wait()
return
def sse_response(
request,
*,
status=200,
reason=None,
headers=None,
sep=None,
response_cls=EventSourceResponse,
):
if not issubclass(response_cls, EventSourceResponse):
raise TypeError(
"response_cls must be subclass of "
"aiohttp_sse.EventSourceResponse, got {}".format(response_cls)
)
sse = response_cls(status=status, reason=reason, headers=headers, sep=sep)
return _ContextManager(sse._prepare(request))
|
b5e1489adb96d50ca3a1035ce4aa900f7140b31e
|
b4e36b2c71b41f8971b57bda977c29503aa4846d
|
/file_deduplication(For_urltoip).py
|
ee5b72ba0c425ff17fb16664b33260175a39d702
|
[] |
no_license
|
3gstudent/Homework-of-Python
|
b79157eceb63f171e2a838479611bb9e5e85018a
|
d436661fbcb3d57021134f61e6c59f4d5a29b948
|
refs/heads/master
| 2023-04-09T15:55:16.360349
| 2023-03-31T03:02:43
| 2023-03-31T03:02:43
| 150,383,185
| 300
| 105
| null | 2022-11-28T02:49:10
| 2018-09-26T07:05:35
|
Python
|
UTF-8
|
Python
| false
| false
| 501
|
py
|
file_deduplication(For_urltoip).py
|
import shutil,sys
def filededuplication(path):
lines_seen = set()
outfile=open(path+"new","w")
f = open(path,"r")
for line in f:
if line.split()[1] not in lines_seen:
outfile.write(line.split()[1] + "\n")
lines_seen.add(line.split()[1])
outfile.close()
print("[*]done")
if __name__ == '__main__':
if len(sys.argv)!=2:
print('[!]Wrong parameter')
print('Usage:')
print('%s <filepath>'%(sys.argv[0]))
sys.exit(0)
else:
filededuplication(sys.argv[1])
|
58bd3bc305c787a3ad2c55cf3b4dc8b7ece96d68
|
abb28a62de39d806d5a63f5b94305069ee5950ca
|
/setup.py
|
9756002119cc279135c518c56c655027d6f47e9e
|
[
"MIT"
] |
permissive
|
qulacs/qulacs
|
0d79074b83b8cc0faa3c31135178b08771be9987
|
413bcd3d02c01e2ad85a711abad252daadd5b832
|
refs/heads/main
| 2023-08-16T21:25:57.217422
| 2023-07-31T01:53:48
| 2023-07-31T01:53:48
| 151,675,481
| 349
| 121
|
MIT
| 2023-09-14T01:53:34
| 2018-10-05T05:39:51
|
C++
|
UTF-8
|
Python
| false
| false
| 5,242
|
py
|
setup.py
|
import os
import platform
import re
import subprocess
import sys
from setuptools import Extension, find_packages, setup
from setuptools.command.build_ext import build_ext
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=""):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
user_options = build_ext.user_options + [
("opt-flags=", "o", "optimization flags for compiler")
]
def initialize_options(self):
build_ext.initialize_options(self)
self.opt_flags = None
def finalize_options(self):
build_ext.finalize_options(self)
def run(self):
try:
subprocess.check_output(["cmake", "--version"])
except OSError:
raise RuntimeError(
"CMake must be installed to build the following extensions: "
+ ", ".join(e.name for e in self.extensions)
)
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
build_args, cmake_args = self._generate_args(ext)
if self.opt_flags is not None:
opt_flags = self.opt_flags
elif os.getenv("QULACS_OPT_FLAGS"):
opt_flags = os.getenv("QULACS_OPT_FLAGS")
else:
opt_flags = None
if opt_flags:
cmake_args += ["-DOPT_FLAGS=" + opt_flags]
if os.getenv("USE_GPU"):
cmake_args += ["-DUSE_GPU:STR=" + os.getenv("USE_GPU")]
if os.getenv("USE_OMP"):
cmake_args += ["-DUSE_OMP:STR=" + os.getenv("USE_OMP")]
if os.getenv("USE_MPI"):
cmake_args += ["-DUSE_MPI:STR=" + os.getenv("USE_MPI")]
env = os.environ.copy()
env["CXXFLAGS"] = '{} -DVERSION_INFO=\\"{}\\"'.format(
env.get("CXXFLAGS", ""), self.distribution.get_version()
)
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
build_dir = os.path.join(os.getcwd(), "build")
os.makedirs(build_dir, exist_ok=True)
subprocess.check_call(
["cmake", ext.sourcedir] + cmake_args, cwd=build_dir, env=env
)
subprocess.check_call(
["cmake", "--build", ".", "--target", "python"] + build_args, cwd=build_dir
)
def _generate_args(self, ext):
# Following directories are created by cmake automatically.
# Directory to output archive file.
archive_dir = os.path.join(os.getcwd(), "lib")
# Directory to output .so file generated by pybind.
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
# Directory to output test binaries.
bindir = os.path.join(os.getcwd(), "bin")
cmake_args = [
"-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY=" + archive_dir,
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=" + extdir,
"-DCMAKE_RUNTIME_OUTPUT_DIRECTORY=" + bindir,
"-DPYTHON_EXECUTABLE=" + sys.executable,
"-DPYTHON_SETUP_FLAG:STR=Yes",
"-DUSE_GPU:STR=No",
]
cfg = "Debug" if self.debug else "Release"
build_args = ["--config", cfg]
if platform.system() == "Windows":
cmake_args += [
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}".format(cfg.upper(), extdir),
"-DCMAKE_RUNTIME_OUTPUT_DIRECTORY_{}={}".format(cfg.upper(), extdir),
]
if sys.maxsize > 2**32:
cmake_args += ["-A", "x64"]
build_args += ["--", "/m"]
else:
# In macOS, gcc/g++ is aliased to clang/clang++.
gcc = os.getenv("C_COMPILER", "gcc")
gxx = os.getenv("CXX_COMPILER", "g++")
if gcc is None or gxx is None:
raise RuntimeError(
"gcc/g++ must be installed to build the following extensions: "
+ ", ".join(e.name for e in self.extensions)
)
cmake_args += ["-DCMAKE_C_COMPILER=" + gcc]
cmake_args += ["-DCMAKE_CXX_COMPILER=" + gxx]
cmake_args += ["-DCMAKE_BUILD_TYPE=" + cfg]
if gcc == "mpicc":
cmake_args += ["-DUSE_MPI:STR=Yes"]
if platform.system() == "Darwin":
# This is for building Python package on GitHub Actions, whose architecture is x86_64.
# Without specifying the architecture explicitly,
# binaries for arm64 is built for x86_64 while cibuildwheel intends to build for arm64.
archs = re.findall(r"-arch (\S+)", os.environ.get("ARCHFLAGS", ""))
if len(archs) > 0:
cmake_args += [
"-DCMAKE_OSX_ARCHITECTURES={}".format(";".join(archs))
]
n_cpus = os.cpu_count()
build_args += ["--", f"-j{n_cpus}"]
return build_args, cmake_args
setup(
package_dir={"": "pysrc"},
packages=find_packages(exclude=["test*"]) + find_packages("pysrc"),
package_data={"": ["py.typed", "*.pyi"]},
ext_modules=[CMakeExtension("qulacs_core")],
cmdclass=dict(build_ext=CMakeBuild),
)
|
a91d2e4e48a21daed90bc15a8859667de73590ec
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stdlib/asyncio/sslproto.pyi
|
3bb4db69c123a28b8159fd9da0aca142a024000f
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 6,093
|
pyi
|
sslproto.pyi
|
import ssl
import sys
from collections import deque
from collections.abc import Callable
from enum import Enum
from typing import Any, ClassVar
from typing_extensions import Literal, TypeAlias
from . import constants, events, futures, protocols, transports
def _create_transport_context(server_side: bool, server_hostname: str | None) -> ssl.SSLContext: ...
if sys.version_info >= (3, 11):
SSLAgainErrors: tuple[type[ssl.SSLWantReadError], type[ssl.SSLSyscallError]]
class SSLProtocolState(Enum):
UNWRAPPED: str
DO_HANDSHAKE: str
WRAPPED: str
FLUSHING: str
SHUTDOWN: str
class AppProtocolState(Enum):
STATE_INIT: str
STATE_CON_MADE: str
STATE_EOF: str
STATE_CON_LOST: str
def add_flowcontrol_defaults(high: int | None, low: int | None, kb: int) -> tuple[int, int]: ...
else:
_UNWRAPPED: Literal["UNWRAPPED"]
_DO_HANDSHAKE: Literal["DO_HANDSHAKE"]
_WRAPPED: Literal["WRAPPED"]
_SHUTDOWN: Literal["SHUTDOWN"]
if sys.version_info < (3, 11):
class _SSLPipe:
max_size: ClassVar[int]
_context: ssl.SSLContext
_server_side: bool
_server_hostname: str | None
_state: str
_incoming: ssl.MemoryBIO
_outgoing: ssl.MemoryBIO
_sslobj: ssl.SSLObject | None
_need_ssldata: bool
_handshake_cb: Callable[[BaseException | None], None] | None
_shutdown_cb: Callable[[], None] | None
def __init__(self, context: ssl.SSLContext, server_side: bool, server_hostname: str | None = ...) -> None: ...
@property
def context(self) -> ssl.SSLContext: ...
@property
def ssl_object(self) -> ssl.SSLObject | None: ...
@property
def need_ssldata(self) -> bool: ...
@property
def wrapped(self) -> bool: ...
def do_handshake(self, callback: Callable[[BaseException | None], object] | None = ...) -> list[bytes]: ...
def shutdown(self, callback: Callable[[], object] | None = ...) -> list[bytes]: ...
def feed_eof(self) -> None: ...
def feed_ssldata(self, data: bytes, only_handshake: bool = ...) -> tuple[list[bytes], list[bytes]]: ...
def feed_appdata(self, data: bytes, offset: int = ...) -> tuple[list[bytes], int]: ...
class _SSLProtocolTransport(transports._FlowControlMixin, transports.Transport):
_sendfile_compatible: ClassVar[constants._SendfileMode]
_loop: events.AbstractEventLoop
_ssl_protocol: SSLProtocol
_closed: bool
def __init__(self, loop: events.AbstractEventLoop, ssl_protocol: SSLProtocol) -> None: ...
def get_extra_info(self, name: str, default: Any | None = ...) -> dict[str, Any]: ...
@property
def _protocol_paused(self) -> bool: ...
def write(self, data: bytes) -> None: ...
def can_write_eof(self) -> Literal[False]: ...
if sys.version_info >= (3, 11):
def get_write_buffer_limits(self) -> tuple[int, int]: ...
def get_read_buffer_limits(self) -> tuple[int, int]: ...
def set_read_buffer_limits(self, high: int | None = ..., low: int | None = ...) -> None: ...
def get_read_buffer_size(self) -> int: ...
if sys.version_info >= (3, 11):
_SSLProtocolBase: TypeAlias = protocols.BufferedProtocol
else:
_SSLProtocolBase: TypeAlias = protocols.Protocol
class SSLProtocol(_SSLProtocolBase):
_server_side: bool
_server_hostname: str | None
_sslcontext: ssl.SSLContext
_extra: dict[str, Any]
_write_backlog: deque[tuple[bytes, int]]
_write_buffer_size: int
_waiter: futures.Future[Any]
_loop: events.AbstractEventLoop
_app_transport: _SSLProtocolTransport
_transport: transports.BaseTransport | None
_ssl_handshake_timeout: int | None
_app_protocol: protocols.BaseProtocol
_app_protocol_is_buffer: bool
if sys.version_info >= (3, 11):
max_size: ClassVar[int]
else:
_sslpipe: _SSLPipe | None
_session_established: bool
_call_connection_made: bool
_in_handshake: bool
_in_shutdown: bool
if sys.version_info >= (3, 11):
def __init__(
self,
loop: events.AbstractEventLoop,
app_protocol: protocols.BaseProtocol,
sslcontext: ssl.SSLContext,
waiter: futures.Future[Any],
server_side: bool = ...,
server_hostname: str | None = ...,
call_connection_made: bool = ...,
ssl_handshake_timeout: int | None = ...,
ssl_shutdown_timeout: float | None = ...,
) -> None: ...
else:
def __init__(
self,
loop: events.AbstractEventLoop,
app_protocol: protocols.BaseProtocol,
sslcontext: ssl.SSLContext,
waiter: futures.Future[Any],
server_side: bool = ...,
server_hostname: str | None = ...,
call_connection_made: bool = ...,
ssl_handshake_timeout: int | None = ...,
) -> None: ...
def _set_app_protocol(self, app_protocol: protocols.BaseProtocol) -> None: ...
def _wakeup_waiter(self, exc: BaseException | None = ...) -> None: ...
def connection_lost(self, exc: BaseException | None) -> None: ...
def eof_received(self) -> None: ...
def _get_extra_info(self, name: str, default: Any | None = ...) -> Any: ...
def _start_shutdown(self) -> None: ...
if sys.version_info >= (3, 11):
def _write_appdata(self, list_of_data: list[bytes]) -> None: ...
else:
def _write_appdata(self, data: bytes) -> None: ...
def _start_handshake(self) -> None: ...
def _check_handshake_timeout(self) -> None: ...
def _on_handshake_complete(self, handshake_exc: BaseException | None) -> None: ...
def _fatal_error(self, exc: BaseException, message: str = ...) -> None: ...
def _abort(self) -> None: ...
if sys.version_info >= (3, 11):
def get_buffer(self, n: int) -> memoryview: ...
else:
def _finalize(self) -> None: ...
def _process_write_backlog(self) -> None: ...
|
1f02591c715b7da7b5d9b4b41627f3013d376123
|
407d194b52fe9cf75cca9d6f3c162a565549a1ae
|
/AzureMonitorAgent/agent.py
|
e1d5f8a162e6b538c8b4113e20c28edb8ca1f016
|
[
"Apache-2.0"
] |
permissive
|
Azure/azure-linux-extensions
|
808761f927045f00548aa68e38d4bec8651c0eba
|
3cea1567fc4f4eb5beea9884153e92d70610394d
|
refs/heads/master
| 2023-08-27T14:06:05.775617
| 2023-08-23T01:56:05
| 2023-08-23T01:56:05
| 19,841,123
| 300
| 314
|
Apache-2.0
| 2023-09-14T04:21:26
| 2014-05-16T01:38:49
|
Python
|
UTF-8
|
Python
| false
| false
| 94,671
|
py
|
agent.py
|
#!/usr/bin/env python
#
# AzureMonitoringLinuxAgent Extension
#
# Copyright 2021 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
# future imports have no effect on python 3 (verified in official docs)
# importing from source causes import errors on python 3, lets skip import
if sys.version_info[0] < 3:
from future import standard_library
standard_library.install_aliases()
from builtins import str
import os
import os.path
import datetime
import signal
import pwd
import grp
import re
import filecmp
import stat
import traceback
import time
import platform
import subprocess
import json
import base64
import inspect
import urllib.request, urllib.parse, urllib.error
import shutil
import crypt
import xml.dom.minidom
import re
import hashlib
import fileinput
from collections import OrderedDict
from distutils.version import LooseVersion
from hashlib import sha256
from shutil import copyfile
from threading import Thread
import telegraf_utils.telegraf_config_handler as telhandler
import metrics_ext_utils.metrics_constants as metrics_constants
import metrics_ext_utils.metrics_ext_handler as me_handler
import metrics_ext_utils.metrics_common_utils as metrics_utils
try:
from Utils.WAAgentUtil import waagent
import Utils.HandlerUtil as HUtil
except Exception as e:
# These utils have checks around the use of them; this is not an exit case
print('Importing utils failed with error: {0}'.format(e))
# This code is taken from the omsagent's extension wrapper.
# This same monkey patch fix is relevant for AMA extension as well.
# This monkey patch duplicates the one made in the waagent import above.
# It is necessary because on 2.6, the waagent monkey patch appears to be overridden
# by the python-future subprocess.check_output backport.
if sys.version_info < (2,7):
def check_output(*popenargs, **kwargs):
r"""Backport from subprocess module from python 2.7"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
# Exception classes used by this module.
class CalledProcessError(Exception):
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
subprocess.check_output = check_output
subprocess.CalledProcessError = CalledProcessError
# Global Variables
PackagesDirectory = 'packages'
# The BundleFileName values will be replaced by actual values in the release pipeline. See apply_version.sh.
BundleFileNameDeb = 'azuremonitoragent.deb'
BundleFileNameRpm = 'azuremonitoragent.rpm'
BundleFileName = ''
TelegrafBinName = 'telegraf'
InitialRetrySleepSeconds = 30
PackageManager = ''
PackageManagerOptions = ''
MdsdCounterJsonPath = '/etc/opt/microsoft/azuremonitoragent/config-cache/metricCounters.json'
FluentCfgPath = '/etc/opt/microsoft/azuremonitoragent/config-cache/fluentbit/td-agent.conf'
AMASyslogConfigMarkerPath = '/etc/opt/microsoft/azuremonitoragent/config-cache/syslog.marker'
AMASyslogPortFilePath = '/etc/opt/microsoft/azuremonitoragent/config-cache/syslog.port'
PreviewFeaturesDirectory = '/etc/opt/microsoft/azuremonitoragent/config-cache/previewFeatures/'
ArcSettingsFile = '/var/opt/azcmagent/localconfig.json'
SupportedArch = set(['x86_64', 'aarch64'])
# Error codes
GenericErrorCode = 1
UnsupportedOperatingSystem = 51
IndeterminateOperatingSystem = 51
MissingorInvalidParameterErrorCode = 53
DPKGOrRPMLockedErrorCode = 56
MissingDependency = 52
# Settings
GenevaConfigKey = "genevaConfiguration"
AzureMonitorConfigKey = "azureMonitorConfiguration"
# Configuration
HUtilObject = None
SettingsSequenceNumber = None
HandlerEnvironment = None
SettingsDict = None
# Change permission of log path - if we fail, that is not an exit case
try:
ext_log_path = '/var/log/azure/'
if os.path.exists(ext_log_path):
os.chmod(ext_log_path, 700)
except:
pass
def main():
"""
Main method
Parse out operation from argument, invoke the operation, and finish.
"""
init_waagent_logger()
waagent_log_info('Azure Monitoring Agent for Linux started to handle.')
# Determine the operation being executed
operation = None
try:
option = sys.argv[1]
if re.match('^([-/]*)(disable)', option):
operation = 'Disable'
elif re.match('^([-/]*)(uninstall)', option):
operation = 'Uninstall'
elif re.match('^([-/]*)(install)', option):
operation = 'Install'
elif re.match('^([-/]*)(enable)', option):
operation = 'Enable'
elif re.match('^([-/]*)(update)', option):
operation = 'Update'
elif re.match('^([-/]*)(metrics)', option):
operation = 'Metrics'
elif re.match('^([-/]*)(syslogconfig)', option):
operation = 'Syslogconfig'
except Exception as e:
waagent_log_error(str(e))
if operation is None:
log_and_exit('Unknown', GenericErrorCode, 'No valid operation provided')
# Set up for exit code and any error messages
exit_code = 0
message = '{0} succeeded'.format(operation)
# Avoid entering broken state where manual purge actions are necessary in low disk space scenario
destructive_operations = ['Disable', 'Uninstall']
if operation not in destructive_operations:
exit_code = check_disk_space_availability()
if exit_code != 0:
message = '{0} failed due to low disk space'.format(operation)
log_and_exit(operation, exit_code, message)
# Invoke operation
try:
global HUtilObject
HUtilObject = parse_context(operation)
exit_code, output = operations[operation]()
# Exit code 1 indicates a general problem that doesn't have a more
# specific error code; it often indicates a missing dependency
if exit_code == 1 and operation == 'Install':
message = 'Install failed with exit code 1. For error details, check logs ' \
'in /var/log/azure/Microsoft.Azure.Monitor' \
'.AzureMonitorLinuxAgent'
elif exit_code is DPKGOrRPMLockedErrorCode and operation == 'Install':
message = 'Install failed with exit code {0} because the ' \
'package manager on the VM is currently locked: ' \
'please wait and try again'.format(DPKGOrRPMLockedErrorCode)
elif exit_code != 0:
message = '{0} failed with exit code {1} {2}'.format(operation,
exit_code, output)
except AzureMonitorAgentForLinuxException as e:
exit_code = e.error_code
message = e.get_error_message(operation)
except Exception as e:
exit_code = GenericErrorCode
message = '{0} failed with error: {1}\n' \
'Stacktrace: {2}'.format(operation, e,
traceback.format_exc())
# Finish up and log messages
log_and_exit(operation, exit_code, message)
def check_disk_space_availability():
"""
Check if there is the required space on the machine.
"""
try:
if get_free_space_mb("/var") < 500 or get_free_space_mb("/etc") < 500 or get_free_space_mb("/opt") < 500 :
# 52 is the exit code for missing dependency i.e. disk space
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
return MissingDependency
else:
return 0
except:
print('Failed to check disk usage.')
return 0
def get_free_space_mb(dirname):
"""
Get the free space in MB in the directory path.
"""
st = os.statvfs(dirname)
return (st.f_bavail * st.f_frsize) // (1024 * 1024)
def is_systemd():
"""
Check if the system is using systemd
"""
return os.path.isdir("/run/systemd/system")
def get_service_command(service, *operations):
"""
Get the appropriate service command [sequence] for the provided service name and operation(s)
"""
if is_systemd():
return " && ".join(["systemctl {0} {1}".format(operation, service) for operation in operations])
else:
hutil_log_info("The VM doesn't have systemctl. Using the init.d service to start {0}.".format(service))
return '/etc/init.d/{0} {1}'.format(service, operations[0])
def check_kill_process(pstring):
for line in os.popen("ps ax | grep " + pstring + " | grep -v grep"):
fields = line.split()
pid = fields[0]
os.kill(int(pid), signal.SIGKILL)
def compare_and_copy_bin(src, dest):
# Check if previous file exist at the location, compare the two binaries,
# If the files are not same, remove the older file, and copy the new one
# If they are the same, then we ignore it and don't copy
if os.path.isfile(src ):
if os.path.isfile(dest):
if not filecmp.cmp(src, dest):
# Removing the file in case it is already being run in a process,
# in which case we can get an error "text file busy" while copying
os.remove(dest)
copyfile(src, dest)
else:
# No previous binary exist, simply copy it and make it executable
copyfile(src, dest)
os.chmod(dest, stat.S_IXGRP | stat.S_IRGRP | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IXOTH | stat.S_IROTH)
def copy_amacoreagent_binaries():
amacoreagent_bin_local_path = os.getcwd() + "/amaCoreAgentBin/amacoreagent"
amacoreagent_bin = "/opt/microsoft/azuremonitoragent/bin/amacoreagent"
compare_and_copy_bin(amacoreagent_bin_local_path, amacoreagent_bin)
agentlauncher_bin_local_path = os.getcwd() + "/agentLauncherBin/agentlauncher"
agentlauncher_bin = "/opt/microsoft/azuremonitoragent/bin/agentlauncher"
compare_and_copy_bin(agentlauncher_bin_local_path, agentlauncher_bin)
def install():
"""
Ensure that this VM distro and version are supported.
Install the Azure Monitor Linux Agent package, using retries.
Note: install operation times out from WAAgent at 15 minutes, so do not
wait longer.
"""
exit_if_vm_not_supported('Install')
find_package_manager("Install")
set_os_arch('Install')
vm_dist, vm_ver = find_vm_distro('Install')
# Check if SUSE 15 VMs have /sbin/insserv package (required for AMA 1.14.4+)
if (vm_dist.startswith('suse') or vm_dist.startswith('sles') or vm_dist.startswith('opensuse')) and vm_ver.startswith('15'):
check_insserv, _ = run_command_and_log("which insserv")
if check_insserv != 0:
hutil_log_info("'insserv-compat' package missing from SUSE 15 machine, installing to allow AMA to run.")
insserv_exit_code, insserv_output = run_command_and_log("zypper --non-interactive install insserv-compat")
if insserv_exit_code != 0:
return insserv_exit_code, insserv_output
package_directory = os.path.join(os.getcwd(), PackagesDirectory)
bundle_path = os.path.join(package_directory, BundleFileName)
os.chmod(bundle_path, 100)
print(PackageManager, " and ", BundleFileName)
AMAInstallCommand = "{0} {1} -i {2}".format(PackageManager, PackageManagerOptions, bundle_path)
hutil_log_info('Running command "{0}"'.format(AMAInstallCommand))
# Retry, since install can fail due to concurrent package operations
exit_code, output = run_command_with_retries_output(AMAInstallCommand, retries = 15,
retry_check = retry_if_dpkg_or_rpm_locked,
final_check = final_check_if_dpkg_or_rpm_locked)
# Retry install for aarch64 rhel8 VMs as initial install fails to create symlink to /etc/systemd/system/azuremonitoragent.service
# in /etc/systemd/system/multi-user.target.wants/azuremonitoragent.service
if vm_dist.replace(' ','').lower().startswith('redhat') and vm_ver == '8.6' and platform.machine() == 'aarch64':
exit_code, output = run_command_with_retries_output(AMAInstallCommand, retries = 15,
retry_check = retry_if_dpkg_or_rpm_locked,
final_check = final_check_if_dpkg_or_rpm_locked)
if exit_code != 0:
return exit_code, output
# Copy the AMACoreAgent and agentlauncher binaries
# TBD: this method needs to be revisited for aarch64
copy_amacoreagent_binaries()
# CL is diabled in arm64 until we have arm64 binaries from pipelineAgent
if is_systemd() and platform.machine() == 'aarch64':
exit_code, output = run_command_and_log('systemctl stop azuremonitor-coreagent && systemctl disable azuremonitor-coreagent')
exit_code, output = run_command_and_log('systemctl stop azuremonitor-agentlauncher && systemctl disable azuremonitor-agentlauncher')
# Set task limits to max of 65K in suse 12
# Based on Task 9764411: AMA broken after 1.7 in sles 12 - https://dev.azure.com/msazure/One/_workitems/edit/9764411
if exit_code == 0:
vm_dist, _ = find_vm_distro('Install')
if (vm_dist.startswith('suse') or vm_dist.startswith('sles')):
try:
suse_exit_code, suse_output = run_command_and_log("mkdir -p /etc/systemd/system/azuremonitoragent.service.d")
if suse_exit_code != 0:
return suse_exit_code, suse_output
suse_exit_code, suse_output = run_command_and_log("echo '[Service]' > /etc/systemd/system/azuremonitoragent.service.d/override.conf")
if suse_exit_code != 0:
return suse_exit_code, suse_output
suse_exit_code, suse_output = run_command_and_log("echo 'TasksMax=65535' >> /etc/systemd/system/azuremonitoragent.service.d/override.conf")
if suse_exit_code != 0:
return suse_exit_code, suse_output
suse_exit_code, suse_output = run_command_and_log("systemctl daemon-reload")
if suse_exit_code != 0:
return suse_exit_code, suse_output
except:
log_and_exit("install", MissingorInvalidParameterErrorCode, "Failed to update /etc/systemd/system/azuremonitoragent.service.d for suse 12,15" )
return exit_code, output
def uninstall():
"""
Uninstall the Azure Monitor Linux Agent.
This is a somewhat soft uninstall. It is not a purge.
Note: uninstall operation times out from WAAgent at 5 minutes
"""
exit_if_vm_not_supported('Uninstall')
find_package_manager("Uninstall")
if PackageManager == "dpkg":
AMAUninstallCommand = "dpkg -P azuremonitoragent"
elif PackageManager == "rpm":
AMAUninstallCommand = "rpm -e azuremonitoragent"
else:
log_and_exit("Uninstall", UnsupportedOperatingSystem, "The OS has neither rpm nor dpkg" )
hutil_log_info('Running command "{0}"'.format(AMAUninstallCommand))
remove_localsyslog_configs()
# Retry, since uninstall can fail due to concurrent package operations
try:
exit_code, output = run_command_with_retries_output(AMAUninstallCommand, retries = 4,
retry_check = retry_if_dpkg_or_rpm_locked,
final_check = final_check_if_dpkg_or_rpm_locked)
except Exception as ex:
exit_code = GenericErrorCode
output = 'Uninstall failed with error: {0}\n' \
'Stacktrace: {1}'.format(ex, traceback.format_exc())
return exit_code, output
def enable():
"""
Start the Azure Monitor Linux Agent Service
This call will return non-zero or throw an exception if
the settings provided are incomplete or incorrect.
Note: enable operation times out from WAAgent at 5 minutes
"""
public_settings, protected_settings = get_settings()
exit_if_vm_not_supported('Enable')
ensure = OrderedDict([
("azuremonitoragent", False),
("azuremonitoragentmgr", False)
])
# Set traceFlags in publicSettings to enable mdsd tracing. For example, the EventIngest flag can be enabled via "traceFlags": "0x2"
flags = ""
if public_settings is not None and "traceFlags" in public_settings:
flags = "-T {} ".format(public_settings.get("traceFlags"))
# Use an Ordered Dictionary to ensure MDSD_OPTIONS (and other dependent variables) are written after their dependencies
default_configs = OrderedDict([
("MDSD_CONFIG_DIR", "/etc/opt/microsoft/azuremonitoragent"),
("MDSD_LOG_DIR", "/var/opt/microsoft/azuremonitoragent/log"),
("MDSD_ROLE_PREFIX", "/run/azuremonitoragent/default"),
("MDSD_SPOOL_DIRECTORY", "/var/opt/microsoft/azuremonitoragent"),
("MDSD_OPTIONS", "\"{}-A -c /etc/opt/microsoft/azuremonitoragent/mdsd.xml -d -r $MDSD_ROLE_PREFIX -S $MDSD_SPOOL_DIRECTORY/eh -L $MDSD_SPOOL_DIRECTORY/events\"".format(flags)),
("MDSD_USE_LOCAL_PERSISTENCY", "true"),
("MDSD_TCMALLOC_RELEASE_FREQ_SEC", "1"),
("MONITORING_USE_GENEVA_CONFIG_SERVICE", "false"),
("ENABLE_MCS", "false")
])
ssl_cert_var_name, ssl_cert_var_value = get_ssl_cert_info('Enable')
default_configs[ssl_cert_var_name] = ssl_cert_var_value
"""
Decide the mode and configuration. There are two supported configuration schema, mix-and-match between schemas is disallowed:
Legacy: allows one of [MCS, GCS single tenant, or GCS multi tenant ("Auto-Config")] modes
Next-Generation: allows MCS, GCS multi tenant, or both
"""
# Next-generation schema
if public_settings is not None and (public_settings.get(GenevaConfigKey) or public_settings.get(AzureMonitorConfigKey)):
geneva_configuration = public_settings.get(GenevaConfigKey)
azure_monitor_configuration = public_settings.get(AzureMonitorConfigKey)
# Check for mix-and match of next-generation and legacy schema content
if len(public_settings) > 1 and ((geneva_configuration and not azure_monitor_configuration) or (azure_monitor_configuration and not geneva_configuration)):
log_and_exit("Enable", MissingorInvalidParameterErrorCode, 'Mixing genevaConfiguration or azureMonitorConfiguration with other configuration schemas is not allowed')
if geneva_configuration and geneva_configuration.get("enable") == True:
hutil_log_info("Detected Geneva+ mode; azuremonitoragentmgr service will be started to handle Geneva tenants")
ensure["azuremonitoragentmgr"] = True
# Note that internally AMCS with geneva config path can be used in which case syslog should be handled same way as default 1P
generate_localsyslog_configs()
if azure_monitor_configuration and azure_monitor_configuration.get("enable") == True:
hutil_log_info("Detected Azure Monitor+ mode; azuremonitoragent service will be started to handle Azure Monitor tenant")
ensure["azuremonitoragent"] = True
azure_monitor_public_settings = azure_monitor_configuration.get("configuration")
azure_monitor_protected_settings = protected_settings.get(AzureMonitorConfigKey) if protected_settings is not None else None
handle_mcs_config(azure_monitor_public_settings, azure_monitor_protected_settings, default_configs)
# Legacy schema
elif public_settings is not None and public_settings.get("GCS_AUTO_CONFIG") == True:
hutil_log_info("Detected Auto-Config mode; azuremonitoragentmgr service will be started to handle Geneva tenants")
ensure["azuremonitoragentmgr"] = True
# generate local syslog configuration files as in auto config syslog is not driven from DCR
generate_localsyslog_configs()
elif (protected_settings is None or len(protected_settings) == 0) or (public_settings is not None and "proxy" in public_settings and "mode" in public_settings.get("proxy") and public_settings.get("proxy").get("mode") == "application"):
hutil_log_info("Detected Azure Monitor mode; azuremonitoragent service will be started to handle Azure Monitor configuration")
ensure["azuremonitoragent"] = True
handle_mcs_config(public_settings, protected_settings, default_configs)
else:
hutil_log_info("Detected Geneva mode; azuremonitoragent service will be started to handle Geneva configuration")
ensure["azuremonitoragent"] = True
handle_gcs_config(public_settings, protected_settings, default_configs)
# generate local syslog configuration files as in 1P syslog is not driven from DCR
generate_localsyslog_configs()
config_file = "/etc/default/azuremonitoragent"
temp_config_file = "/etc/default/azuremonitoragent_temp"
try:
if os.path.isfile(config_file):
new_config = "\n".join(["export {0}={1}".format(key, value) for key, value in default_configs.items()]) + "\n"
with open(temp_config_file, "w") as f:
f.write(new_config)
if not os.path.isfile(temp_config_file):
log_and_exit("Enable", GenericErrorCode, "Error while updating environment variables in {0}".format(config_file))
os.remove(config_file)
os.rename(temp_config_file, config_file)
else:
log_and_exit("Enable", GenericErrorCode, "Could not find the file {0}".format(config_file))
except Exception as e:
log_and_exit("Enable", GenericErrorCode, "Failed to add environment variables to {0}: {1}".format(config_file, e))
if "ENABLE_MCS" in default_configs and default_configs["ENABLE_MCS"] == "true":
start_amacoreagent()
restart_launcher()
# start the metrics and syslog watcher only in 3P mode
start_metrics_process()
start_syslogconfig_process()
hutil_log_info('Handler initiating onboarding.')
if HUtilObject and HUtilObject.is_seq_smaller():
# Either upgrade has just happened (in which case we need to start), or enable was called with no change to extension config
hutil_log_info("Current sequence number, " + HUtilObject._context._seq_no + ", is not greater than the LKG sequence number. Starting service(s) only if it is not yet running.")
operations = ["start", "enable"]
else:
# Either this is a clean install (in which case restart is effectively start), or extension config has changed
hutil_log_info("Current sequence number, " + HUtilObject._context._seq_no + ", is greater than the LKG sequence number. Restarting service(s) to pick up the new config.")
operations = ["restart", "enable"]
output = ""
# Ensure non-required services are not running; do not block if this step fails
for service in [s for s in ensure.keys() if not ensure[s]]:
exit_code, disable_output = run_command_and_log(get_service_command(service, "stop", "disable"))
output += disable_output
for service in [s for s in ensure.keys() if ensure[s]]:
exit_code, enable_output = run_command_and_log(get_service_command(service, *operations))
output += enable_output
if exit_code != 0:
status_command = get_service_command(service, "status")
status_exit_code, status_output = run_command_and_log(status_command)
if status_exit_code != 0:
output += "Output of '{0}':\n{1}".format(status_command, status_output)
return exit_code, output
# Service(s) were successfully configured and started; increment sequence number
HUtilObject.save_seq()
return exit_code, output
def handle_gcs_config(public_settings, protected_settings, default_configs):
"""
Populate the defaults for legacy-path GCS mode
"""
# look for LA protected settings
for var in list(protected_settings.keys()):
if "_key" in var or "_id" in var:
default_configs[var] = protected_settings.get(var)
# check if required GCS params are available
MONITORING_GCS_CERT_CERTFILE = None
if "certificate" in protected_settings:
MONITORING_GCS_CERT_CERTFILE = base64.standard_b64decode(protected_settings.get("certificate"))
if "certificatePath" in protected_settings:
try:
with open(protected_settings.get("certificatePath"), 'r') as f:
MONITORING_GCS_CERT_CERTFILE = f.read()
except Exception as ex:
log_and_exit('Enable', MissingorInvalidParameterErrorCode, 'Failed to read certificate {0}: {1}'.format(protected_settings.get("certificatePath"), ex))
MONITORING_GCS_CERT_KEYFILE = None
if "certificateKey" in protected_settings:
MONITORING_GCS_CERT_KEYFILE = base64.standard_b64decode(protected_settings.get("certificateKey"))
if "certificateKeyPath" in protected_settings:
try:
with open(protected_settings.get("certificateKeyPath"), 'r') as f:
MONITORING_GCS_CERT_KEYFILE = f.read()
except Exception as ex:
log_and_exit('Enable', MissingorInvalidParameterErrorCode, 'Failed to read certificate key {0}: {1}'.format(protected_settings.get("certificateKeyPath"), ex))
MONITORING_GCS_ENVIRONMENT = ""
if "monitoringGCSEnvironment" in protected_settings:
MONITORING_GCS_ENVIRONMENT = protected_settings.get("monitoringGCSEnvironment")
MONITORING_GCS_NAMESPACE = ""
if "namespace" in protected_settings:
MONITORING_GCS_NAMESPACE = protected_settings.get("namespace")
MONITORING_GCS_ACCOUNT = ""
if "monitoringGCSAccount" in protected_settings:
MONITORING_GCS_ACCOUNT = protected_settings.get("monitoringGCSAccount")
MONITORING_GCS_REGION = ""
if "monitoringGCSRegion" in protected_settings:
MONITORING_GCS_REGION = protected_settings.get("monitoringGCSRegion")
MONITORING_CONFIG_VERSION = ""
if "configVersion" in protected_settings:
MONITORING_CONFIG_VERSION = protected_settings.get("configVersion")
MONITORING_GCS_AUTH_ID_TYPE = ""
if "monitoringGCSAuthIdType" in protected_settings:
MONITORING_GCS_AUTH_ID_TYPE = protected_settings.get("monitoringGCSAuthIdType")
MONITORING_GCS_AUTH_ID = ""
if "monitoringGCSAuthId" in protected_settings:
MONITORING_GCS_AUTH_ID = protected_settings.get("monitoringGCSAuthId")
MONITORING_TENANT = ""
if "monitoringTenant" in protected_settings:
MONITORING_TENANT = protected_settings.get("monitoringTenant")
MONITORING_ROLE = ""
if "monitoringRole" in protected_settings:
MONITORING_ROLE = protected_settings.get("monitoringRole")
MONITORING_ROLE_INSTANCE = ""
if "monitoringRoleInstance" in protected_settings:
MONITORING_ROLE_INSTANCE = protected_settings.get("monitoringRoleInstance")
if ((MONITORING_GCS_CERT_CERTFILE is None or MONITORING_GCS_CERT_KEYFILE is None) and (MONITORING_GCS_AUTH_ID_TYPE == "")) or MONITORING_GCS_ENVIRONMENT == "" or MONITORING_GCS_NAMESPACE == "" or MONITORING_GCS_ACCOUNT == "" or MONITORING_GCS_REGION == "" or MONITORING_CONFIG_VERSION == "":
log_and_exit("Enable", MissingorInvalidParameterErrorCode, 'Not all required GCS parameters are provided')
else:
# set the values for GCS
default_configs["MONITORING_USE_GENEVA_CONFIG_SERVICE"] = "true"
default_configs["MONITORING_GCS_ENVIRONMENT"] = MONITORING_GCS_ENVIRONMENT
default_configs["MONITORING_GCS_NAMESPACE"] = MONITORING_GCS_NAMESPACE
default_configs["MONITORING_GCS_ACCOUNT"] = MONITORING_GCS_ACCOUNT
default_configs["MONITORING_GCS_REGION"] = MONITORING_GCS_REGION
default_configs["MONITORING_CONFIG_VERSION"] = MONITORING_CONFIG_VERSION
# write the certificate and key to disk
uid = pwd.getpwnam("syslog").pw_uid
gid = grp.getgrnam("syslog").gr_gid
if MONITORING_GCS_AUTH_ID_TYPE != "":
default_configs["MONITORING_GCS_AUTH_ID_TYPE"] = MONITORING_GCS_AUTH_ID_TYPE
if MONITORING_GCS_AUTH_ID != "":
default_configs["MONITORING_GCS_AUTH_ID"] = MONITORING_GCS_AUTH_ID
if MONITORING_GCS_CERT_CERTFILE is not None:
default_configs["MONITORING_GCS_CERT_CERTFILE"] = "/etc/opt/microsoft/azuremonitoragent/gcscert.pem"
with open("/etc/opt/microsoft/azuremonitoragent/gcscert.pem", "wb") as f:
f.write(MONITORING_GCS_CERT_CERTFILE)
os.chown("/etc/opt/microsoft/azuremonitoragent/gcscert.pem", uid, gid)
os.system('chmod {1} {0}'.format("/etc/opt/microsoft/azuremonitoragent/gcscert.pem", 400))
if MONITORING_GCS_CERT_KEYFILE is not None:
default_configs["MONITORING_GCS_CERT_KEYFILE"] = "/etc/opt/microsoft/azuremonitoragent/gcskey.pem"
with open("/etc/opt/microsoft/azuremonitoragent/gcskey.pem", "wb") as f:
f.write(MONITORING_GCS_CERT_KEYFILE)
os.chown("/etc/opt/microsoft/azuremonitoragent/gcskey.pem", uid, gid)
os.system('chmod {1} {0}'.format("/etc/opt/microsoft/azuremonitoragent/gcskey.pem", 400))
if MONITORING_TENANT != "":
default_configs["MONITORING_TENANT"] = MONITORING_TENANT
if MONITORING_ROLE != "":
default_configs["MONITORING_ROLE"] = MONITORING_ROLE
if MONITORING_TENANT != "":
default_configs["MONITORING_ROLE_INSTANCE"] = MONITORING_ROLE_INSTANCE
def handle_mcs_config(public_settings, protected_settings, default_configs):
"""
Populate the defaults for MCS mode
"""
default_configs["ENABLE_MCS"] = "true"
default_configs["PA_GIG_BRIDGE_MODE"] = "true"
# April 2022: PA_FLUENT_SOCKET_PORT setting is being deprecated in place of PA_DATA_PORT. Remove when AMA 1.17 and earlier no longer need servicing.
default_configs["PA_FLUENT_SOCKET_PORT"] = "13000"
# this port will be dynamic in future
default_configs["PA_DATA_PORT"] = "13000"
# fetch proxy settings
if public_settings is not None and "proxy" in public_settings and "mode" in public_settings.get("proxy") and public_settings.get("proxy").get("mode") == "application":
default_configs["MDSD_PROXY_MODE"] = "application"
if "address" in public_settings.get("proxy"):
default_configs["MDSD_PROXY_ADDRESS"] = public_settings.get("proxy").get("address")
else:
log_and_exit("Enable", MissingorInvalidParameterErrorCode, 'Parameter "address" is required in proxy public setting')
if "auth" in public_settings.get("proxy") and public_settings.get("proxy").get("auth") == True:
if protected_settings is not None and "proxy" in protected_settings and "username" in protected_settings.get("proxy") and "password" in protected_settings.get("proxy"):
default_configs["MDSD_PROXY_USERNAME"] = protected_settings.get("proxy").get("username")
default_configs["MDSD_PROXY_PASSWORD"] = protected_settings.get("proxy").get("password")
set_proxy(default_configs["MDSD_PROXY_ADDRESS"], default_configs["MDSD_PROXY_USERNAME"], default_configs["MDSD_PROXY_PASSWORD"])
else:
log_and_exit("Enable", MissingorInvalidParameterErrorCode, 'Parameter "username" and "password" not in proxy protected setting')
else:
set_proxy(default_configs["MDSD_PROXY_ADDRESS"], "", "")
# is this Arc? If so, check for proxy
if os.path.isfile(ArcSettingsFile):
f = open(ArcSettingsFile, "r")
data = f.read()
if (data != ''):
json_data = json.loads(data)
if json_data is not None and "proxy.url" in json_data:
url = json_data["proxy.url"]
# only non-authenticated proxy config is supported
if url != '':
default_configs["MDSD_PROXY_ADDRESS"] = url
set_proxy(default_configs["MDSD_PROXY_ADDRESS"], "", "")
# add managed identity settings if they were provided
identifier_name, identifier_value, error_msg = get_managed_identity()
if error_msg:
log_and_exit("Enable", MissingorInvalidParameterErrorCode, 'Failed to determine managed identity settings. {0}.'.format(error_msg))
if identifier_name and identifier_value:
default_configs["MANAGED_IDENTITY"] = "{0}#{1}".format(identifier_name, identifier_value)
def disable():
"""
Disable Azure Monitor Linux Agent process on the VM.
Note: disable operation times out from WAAgent at 15 minutes
"""
#stop the metrics process
stop_metrics_process()
#stop syslog config watcher process
stop_syslogconfig_process()
# stop amacoreagent and agent launcher
hutil_log_info('Handler initiating Core Agent and agent launcher')
if is_systemd() and platform.machine() != 'aarch64':
exit_code, output = run_command_and_log('systemctl stop azuremonitor-coreagent && systemctl disable azuremonitor-coreagent')
exit_code, output = run_command_and_log('systemctl stop azuremonitor-agentlauncher && systemctl disable azuremonitor-agentlauncher')
# in case AL is not cleaning up properly
check_kill_process('/opt/microsoft/azuremonitoragent/bin/fluent-bit')
# Stop and disable systemd services so they are not started after system reboot.
for service in ["azuremonitoragent", "azuremonitoragentmgr"]:
exit_code, output = run_command_and_log(get_service_command(service, "stop", "disable"))
if exit_code != 0:
status_command = get_service_command(service, "status")
status_exit_code, status_output = run_command_and_log(status_command)
if status_exit_code != 0:
output += "Output of '{0}':\n{1}".format(status_command, status_output)
return exit_code, output
def update():
"""
Update the current installation of AzureMonitorLinuxAgent
No logic to install the agent as agent -> install() will be called
with update because upgradeMode = "UpgradeWithInstall" set in HandlerManifest
"""
return 0, ""
def start_amacoreagent():
if platform.machine() == 'aarch64':
return
# start Core Agent
hutil_log_info('Handler initiating Core Agent')
if is_systemd():
exit_code, output = run_command_and_log('systemctl start azuremonitor-coreagent && systemctl enable azuremonitor-coreagent')
def restart_launcher():
if platform.machine() == 'aarch64':
return
# start agent launcher
hutil_log_info('Handler initiating agent launcher')
if is_systemd():
exit_code, output = run_command_and_log('systemctl stop azuremonitor-agentlauncher && systemctl disable azuremonitor-agentlauncher')
# in case AL is not cleaning up properly
check_kill_process('/opt/microsoft/azuremonitoragent/bin/fluent-bit')
exit_code, output = run_command_and_log('systemctl restart azuremonitor-agentlauncher && systemctl enable azuremonitor-agentlauncher')
def set_proxy(address, username, password):
"""
# Set proxy http_proxy env var in dependent services
"""
try:
http_proxy = address
address = address.replace("http://","")
if username:
http_proxy = "http://" + username + ":" + password + "@" + address
# Update Coreagent
run_command_and_log("mkdir -p /etc/systemd/system/azuremonitor-coreagent.service.d")
run_command_and_log("echo '[Service]' > /etc/systemd/system/azuremonitor-coreagent.service.d/proxy.conf")
run_command_and_log("echo 'Environment=\"http_proxy={0}\"' >> /etc/systemd/system/azuremonitor-coreagent.service.d/proxy.conf".format(http_proxy))
run_command_and_log("echo 'Environment=\"https_proxy={0}\"' >> /etc/systemd/system/azuremonitor-coreagent.service.d/proxy.conf".format(http_proxy))
os.system('chmod {1} {0}'.format("/etc/systemd/system/azuremonitor-coreagent.service.d/proxy.conf", 400))
# Update ME
run_command_and_log("mkdir -p /etc/systemd/system/metrics-extension.service.d")
run_command_and_log("echo '[Service]' > /etc/systemd/system/metrics-extension.service.d/proxy.conf")
run_command_and_log("echo 'Environment=\"http_proxy={0}\"' >> /etc/systemd/system/metrics-extension.service.d/proxy.conf".format(http_proxy))
run_command_and_log("echo 'Environment=\"https_proxy={0}\"' >> /etc/systemd/system/metrics-extension.service.d/proxy.conf".format(http_proxy))
os.system('chmod {1} {0}'.format("/etc/systemd/system/metrics-extension.service.d/proxy.conf", 400))
run_command_and_log("systemctl daemon-reload")
except:
log_and_exit("enable", MissingorInvalidParameterErrorCode, "Failed to update /etc/systemd/system/azuremonitor-coreagent.service.d and mkdir -p /etc/systemd/system/metrics-extension.service.d" )
def get_managed_identity():
"""
# Determine Managed Identity (MI) settings
# Nomenclature: Managed System Identity (MSI), System-Assigned Identity (SAI), User-Assigned Identity (UAI)
# Unspecified MI scenario: MSI returns SAI token if exists, otherwise returns UAI token if exactly one UAI exists, otherwise failure
# Specified MI scenario: MSI returns token for specified MI
# Returns identifier_name, identifier_value, and error message (if any)
"""
identifier_name = identifier_value = ""
public_settings, _ = get_settings()
if public_settings is not None and public_settings.get(AzureMonitorConfigKey):
azure_monitor_configuration = public_settings.get(AzureMonitorConfigKey)
if azure_monitor_configuration and azure_monitor_configuration.get("enable") == True:
public_settings = azure_monitor_configuration.get("configuration")
if public_settings is not None and "authentication" in public_settings and "managedIdentity" in public_settings.get("authentication"):
managedIdentity = public_settings.get("authentication").get("managedIdentity")
if "identifier-name" not in managedIdentity or "identifier-value" not in managedIdentity:
return identifier_name, identifier_value, 'Parameters "identifier-name" and "identifier-value" are both required in authentication.managedIdentity public setting'
identifier_name = managedIdentity.get("identifier-name")
identifier_value = managedIdentity.get("identifier-value")
if identifier_name not in ["client_id", "mi_res_id"]:
return identifier_name, identifier_value, 'Invalid identifier-name provided; must be "client_id" or "mi_res_id"'
if not identifier_value:
return identifier_name, identifier_value, 'Invalid identifier-value provided; cannot be empty'
if identifier_name in ["object_id", "client_id"]:
guid_re = re.compile(r'[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}')
if not guid_re.search(identifier_value):
return identifier_name, identifier_value, 'Invalid identifier-value provided for {0}; must be a GUID'.format(identifier_name)
return identifier_name, identifier_value, ""
def stop_metrics_process():
if telhandler.is_running(is_lad=False):
#Stop the telegraf and ME services
tel_out, tel_msg = telhandler.stop_telegraf_service(is_lad=False)
if tel_out:
hutil_log_info(tel_msg)
else:
hutil_log_error(tel_msg)
#Delete the telegraf and ME services
tel_rm_out, tel_rm_msg = telhandler.remove_telegraf_service(is_lad=False)
if tel_rm_out:
hutil_log_info(tel_rm_msg)
else:
hutil_log_error(tel_rm_msg)
if me_handler.is_running(is_lad=False):
me_out, me_msg = me_handler.stop_metrics_service(is_lad=False)
if me_out:
hutil_log_info(me_msg)
else:
hutil_log_error(me_msg)
me_rm_out, me_rm_msg = me_handler.remove_metrics_service(is_lad=False)
if me_rm_out:
hutil_log_info(me_rm_msg)
else:
hutil_log_error(me_rm_msg)
pids_filepath = os.path.join(os.getcwd(),'amametrics.pid')
# kill existing metrics watcher
if os.path.exists(pids_filepath):
with open(pids_filepath, "r") as f:
for pid in f.readlines():
# Verify the pid actually belongs to AMA metrics watcher.
cmd_file = os.path.join("/proc", str(pid.strip("\n")), "cmdline")
if os.path.exists(cmd_file):
with open(cmd_file, "r") as pidf:
cmdline = pidf.readlines()
if len(cmdline) > 0 and cmdline[0].find("agent.py") >= 0 and cmdline[0].find("-metrics") >= 0:
kill_cmd = "kill " + pid
run_command_and_log(kill_cmd)
run_command_and_log("rm "+pids_filepath)
def stop_syslogconfig_process():
pids_filepath = os.path.join(os.getcwd(),'amasyslogconfig.pid')
# kill existing syslog config watcher
if os.path.exists(pids_filepath):
with open(pids_filepath, "r") as f:
for pid in f.readlines():
# Verify the pid actually belongs to AMA syslog watcher.
cmd_file = os.path.join("/proc", str(pid.strip("\n")), "cmdline")
if os.path.exists(cmd_file):
with open(cmd_file, "r") as pidf:
cmdline = pidf.readlines()
if len(cmdline) > 0 and cmdline[0].find("agent.py") >= 0 and cmdline[0].find("-syslogconfig") >= 0:
kill_cmd = "kill " + pid
run_command_and_log(kill_cmd)
run_command_and_log("rm "+ pids_filepath)
def is_metrics_process_running():
pids_filepath = os.path.join(os.getcwd(),'amametrics.pid')
if os.path.exists(pids_filepath):
with open(pids_filepath, "r") as f:
for pid in f.readlines():
# Verify the pid actually belongs to AMA metrics watcher.
cmd_file = os.path.join("/proc", str(pid.strip("\n")), "cmdline")
if os.path.exists(cmd_file):
with open(cmd_file, "r") as pidf:
cmdline = pidf.readlines()
if len(cmdline) > 0 and cmdline[0].find("agent.py") >= 0 and cmdline[0].find("-metrics") >= 0:
return True
return False
def is_syslogconfig_process_running():
pids_filepath = os.path.join(os.getcwd(),'amasyslogconfig.pid')
if os.path.exists(pids_filepath):
with open(pids_filepath, "r") as f:
for pid in f.readlines():
# Verify the pid actually belongs to AMA syslog watcher.
cmd_file = os.path.join("/proc", str(pid.strip("\n")), "cmdline")
if os.path.exists(cmd_file):
with open(cmd_file, "r") as pidf:
cmdline = pidf.readlines()
if len(cmdline) > 0 and cmdline[0].find("agent.py") >= 0 and cmdline[0].find("-syslogconfig") >= 0:
return True
return False
def start_metrics_process():
"""
Start metrics process that performs periodic monitoring activities
:return: None
"""
# if metrics process is already running, it should manage lifecycle of telegraf, ME,
# process to refresh ME MSI token and look for new config changes if counters change, etc, so this is no-op
if not is_metrics_process_running():
stop_metrics_process()
# Start metrics watcher
ama_path = os.path.join(os.getcwd(), 'agent.py')
args = [sys.executable, ama_path, '-metrics']
log = open(os.path.join(os.getcwd(), 'daemon.log'), 'w')
hutil_log_info('start watcher process '+str(args))
subprocess.Popen(args, stdout=log, stderr=log)
def start_syslogconfig_process():
"""
Start syslog check process that performs periodic DCR monitoring activities and looks for syslog config changes
:return: None
"""
# test
if not is_syslogconfig_process_running():
stop_syslogconfig_process()
# Start syslog config watcher
ama_path = os.path.join(os.getcwd(), 'agent.py')
args = [sys.executable, ama_path, '-syslogconfig']
log = open(os.path.join(os.getcwd(), 'daemon.log'), 'w')
hutil_log_info('start syslog watcher process '+str(args))
subprocess.Popen(args, stdout=log, stderr=log)
def metrics_watcher(hutil_error, hutil_log):
"""
Watcher thread to monitor metric configuration changes and to take action on them
"""
# Check every 30 seconds
sleepTime = 30
# Retrieve managed identity info that may be needed for token retrieval
identifier_name, identifier_value, error_msg = get_managed_identity()
if error_msg:
hutil_error('Failed to determine managed identity settings; MSI token retreival will rely on default identity, if any. {0}.'.format(error_msg))
# Sleep before starting the monitoring
time.sleep(sleepTime)
last_crc = None
last_crc_fluent = None
me_msi_token_expiry_epoch = None
while True:
try:
if os.path.isfile(FluentCfgPath):
f = open(FluentCfgPath, "r")
data = f.read()
if (data != ''):
crc_fluent = hashlib.sha256(data.encode('utf-8')).hexdigest()
if (crc_fluent != last_crc_fluent):
restart_launcher()
last_crc_fluent = crc_fluent
if os.path.isfile(MdsdCounterJsonPath):
f = open(MdsdCounterJsonPath, "r")
data = f.read()
if (data != ''):
json_data = json.loads(data)
if len(json_data) == 0:
last_crc = hashlib.sha256(data.encode('utf-8')).hexdigest()
if telhandler.is_running(is_lad=False):
# Stop the telegraf and ME services
tel_out, tel_msg = telhandler.stop_telegraf_service(is_lad=False)
if tel_out:
hutil_log(tel_msg)
else:
hutil_error(tel_msg)
# Delete the telegraf and ME services
tel_rm_out, tel_rm_msg = telhandler.remove_telegraf_service(is_lad=False)
if tel_rm_out:
hutil_log(tel_rm_msg)
else:
hutil_error(tel_rm_msg)
if me_handler.is_running(is_lad=False):
me_out, me_msg = me_handler.stop_metrics_service(is_lad=False)
if me_out:
hutil_log(me_msg)
else:
hutil_error(me_msg)
me_rm_out, me_rm_msg = me_handler.remove_metrics_service(is_lad=False)
if me_rm_out:
hutil_log(me_rm_msg)
else:
hutil_error(me_rm_msg)
else:
crc = hashlib.sha256(data.encode('utf-8')).hexdigest()
if(crc != last_crc):
# Resetting the me_msi_token_expiry_epoch variable if we set up ME again.
me_msi_token_expiry_epoch = None
hutil_log("Start processing metric configuration")
hutil_log(data)
telegraf_config, telegraf_namespaces = telhandler.handle_config(
json_data,
"udp://127.0.0.1:" + metrics_constants.ama_metrics_extension_udp_port,
"unix:///run/azuremonitoragent/default_influx.socket",
is_lad=False)
me_handler.setup_me(is_lad=False, HUtilObj=HUtilObject)
start_telegraf_res, log_messages = telhandler.start_telegraf(is_lad=False)
if start_telegraf_res:
hutil_log("Successfully started metrics-sourcer.")
else:
hutil_error(log_messages)
start_metrics_out, log_messages = me_handler.start_metrics(is_lad=False)
if start_metrics_out:
hutil_log("Successfully started metrics-extension.")
else:
hutil_error(log_messages)
last_crc = crc
generate_token = False
me_token_path = os.path.join(os.getcwd(), "/config/metrics_configs/AuthToken-MSI.json")
if me_msi_token_expiry_epoch is None or me_msi_token_expiry_epoch == "":
if os.path.isfile(me_token_path):
with open(me_token_path, "r") as f:
authtoken_content = f.read()
if authtoken_content and "expires_on" in authtoken_content:
me_msi_token_expiry_epoch = authtoken_content["expires_on"]
else:
generate_token = True
else:
generate_token = True
if me_msi_token_expiry_epoch:
currentTime = datetime.datetime.now()
token_expiry_time = datetime.datetime.fromtimestamp(int(me_msi_token_expiry_epoch))
if token_expiry_time - currentTime < datetime.timedelta(minutes=30):
# The MSI Token will expire within 30 minutes. We need to refresh the token
generate_token = True
if generate_token:
generate_token = False
msi_token_generated, me_msi_token_expiry_epoch, log_messages = me_handler.generate_MSI_token(identifier_name, identifier_value)
if msi_token_generated:
hutil_log("Successfully refreshed metrics-extension MSI Auth token.")
else:
hutil_error(log_messages)
telegraf_restart_retries = 0
me_restart_retries = 0
max_restart_retries = 10
# Check if telegraf is running, if not, then restart
if not telhandler.is_running(is_lad=False):
if telegraf_restart_retries < max_restart_retries:
telegraf_restart_retries += 1
hutil_log("Telegraf binary process is not running. Restarting telegraf now. Retry count - {0}".format(telegraf_restart_retries))
tel_out, tel_msg = telhandler.stop_telegraf_service(is_lad=False)
if tel_out:
hutil_log(tel_msg)
else:
hutil_error(tel_msg)
start_telegraf_res, log_messages = telhandler.start_telegraf(is_lad=False)
if start_telegraf_res:
hutil_log("Successfully started metrics-sourcer.")
else:
hutil_error(log_messages)
else:
hutil_error("Telegraf binary process is not running. Failed to restart after {0} retries. Please check telegraf.log".format(max_restart_retries))
else:
telegraf_restart_retries = 0
# Check if ME is running, if not, then restart
if not me_handler.is_running(is_lad=False):
if me_restart_retries < max_restart_retries:
me_restart_retries += 1
hutil_log("MetricsExtension binary process is not running. Restarting MetricsExtension now. Retry count - {0}".format(me_restart_retries))
me_out, me_msg = me_handler.stop_metrics_service(is_lad=False)
if me_out:
hutil_log(me_msg)
else:
hutil_error(me_msg)
start_metrics_out, log_messages = me_handler.start_metrics(is_lad=False)
if start_metrics_out:
hutil_log("Successfully started metrics-extension.")
else:
hutil_error(log_messages)
else:
hutil_error("MetricsExtension binary process is not running. Failed to restart after {0} retries. Please check /var/log/syslog for ME logs".format(max_restart_retries))
else:
me_restart_retries = 0
except IOError as e:
hutil_error('I/O error in setting up or monitoring metrics. Exception={0}'.format(e))
except Exception as e:
hutil_error('Error in setting up or monitoring metrics. Exception={0}'.format(e))
finally:
time.sleep(sleepTime)
def syslogconfig_watcher(hutil_error, hutil_log):
"""
Watcher thread to monitor syslog configuration changes and to take action on them
"""
syslog_enabled = False
# Check for config changes every 30 seconds
sleepTime = 30
# Sleep before starting the monitoring
time.sleep(sleepTime)
while True:
try:
if os.path.isfile(AMASyslogConfigMarkerPath):
f = open(AMASyslogConfigMarkerPath, "r")
data = f.read()
if (data != ''):
if "true" in data:
syslog_enabled = True
f.close()
if syslog_enabled:
# place syslog local configs
syslog_enabled = False
generate_localsyslog_configs()
else:
# remove syslog local configs
remove_localsyslog_configs()
except IOError as e:
hutil_error('I/O error in setting up syslog config watcher. Exception={0}'.format(e))
except Exception as e:
hutil_error('Error in setting up syslog config watcher. Exception={0}'.format(e))
finally:
time.sleep(sleepTime)
def generate_localsyslog_configs():
"""
Install local syslog configuration files if not present and restart syslog
"""
public_settings, _ = get_settings()
syslog_port = ''
if os.path.isfile(AMASyslogPortFilePath):
f = open(AMASyslogPortFilePath, "r")
syslog_port = f.read()
f.close()
useSyslogTcp = False
syslogTcpPreviewFlagPath = PreviewFeaturesDirectory + 'useSyslogTcp'
if os.path.exists(syslogTcpPreviewFlagPath):
useSyslogTcp = True
# always use syslog tcp port, unless
# - the distro is Red Hat based and doesn't have semanage
# these distros seem to have SELinux on by default and we shouldn't be installing semanage ourselves
if not os.path.exists('/etc/selinux/config'):
useSyslogTcp = True
else:
sedisabled, _ = run_command_and_log('getenforce | grep -i "Disabled"')
if sedisabled == 0:
useSyslogTcp = True
else:
check_semanage, _ = run_command_and_log("which semanage")
if check_semanage != 0:
hutil_log_info("semanage not found, cannot let TCP Port through for syslog")
elif syslog_port != '':
# allow the syslog port in SELinux
run_command_and_log('semanage port -a -t syslogd_port_t -p tcp ' + syslog_port)
useSyslogTcp = True
if useSyslogTcp == True and syslog_port != '':
if os.path.exists('/etc/rsyslog.d/'):
restartRequired = False
if not os.path.exists('/etc/rsyslog.d/10-azuremonitoragent-omfwd.conf'):
if os.path.exists('/etc/rsyslog.d/10-azuremonitoragent.conf'):
os.remove("/etc/rsyslog.d/05-azuremonitoragent-loadomuxsock.conf")
os.remove("/etc/rsyslog.d/10-azuremonitoragent.conf")
copyfile("/etc/opt/microsoft/azuremonitoragent/syslog/rsyslogconf/10-azuremonitoragent-omfwd.conf","/etc/rsyslog.d/10-azuremonitoragent-omfwd.conf")
os.chmod('/etc/rsyslog.d/10-azuremonitoragent-omfwd.conf', stat.S_IRGRP | stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH)
restartRequired = True
portSetting = 'Port="' + syslog_port + '"'
defaultPortSetting = 'Port="28330"'
portUpdated = False
with open('/etc/rsyslog.d/10-azuremonitoragent-omfwd.conf') as f:
if portSetting not in f.read():
portUpdated = True
if portUpdated == True:
copyfile("/etc/opt/microsoft/azuremonitoragent/syslog/rsyslogconf/10-azuremonitoragent-omfwd.conf","/etc/rsyslog.d/10-azuremonitoragent-omfwd.conf")
with fileinput.FileInput('/etc/rsyslog.d/10-azuremonitoragent-omfwd.conf', inplace=True, backup='.bak') as file:
for line in file:
print(line.replace(defaultPortSetting, portSetting), end='')
os.chmod('/etc/rsyslog.d/10-azuremonitoragent-omfwd.conf', stat.S_IRGRP | stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH)
restartRequired = True
if restartRequired == True:
run_command_and_log(get_service_command("rsyslog", "restart"))
hutil_log_info("Installed local syslog configuration files and restarted syslog")
if os.path.exists('/etc/syslog-ng/syslog-ng.conf'):
restartRequired = False
if not os.path.exists('/etc/syslog-ng/conf.d/azuremonitoragent-tcp.conf'):
if os.path.exists('/etc/syslog-ng/conf.d/azuremonitoragent.conf'):
os.remove("/etc/syslog-ng/conf.d/azuremonitoragent.conf")
syslog_ng_confpath = os.path.join('/etc/syslog-ng/', 'conf.d')
if not os.path.exists(syslog_ng_confpath):
os.makedirs(syslog_ng_confpath)
copyfile("/etc/opt/microsoft/azuremonitoragent/syslog/syslog-ngconf/azuremonitoragent-tcp.conf","/etc/syslog-ng/conf.d/azuremonitoragent-tcp.conf")
os.chmod('/etc/syslog-ng/conf.d/azuremonitoragent-tcp.conf', stat.S_IRGRP | stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH)
restartRequired = True
portSetting = "port(" + syslog_port + ")"
defaultPortSetting = "port(28330)"
portUpdated = False
with open('/etc/syslog-ng/conf.d/azuremonitoragent-tcp.conf') as f:
if portSetting not in f.read():
portUpdated = True
if portUpdated == True:
copyfile("/etc/opt/microsoft/azuremonitoragent/syslog/syslog-ngconf/azuremonitoragent-tcp.conf","/etc/syslog-ng/conf.d/azuremonitoragent-tcp.conf")
with fileinput.FileInput('/etc/syslog-ng/conf.d/azuremonitoragent-tcp.conf', inplace=True, backup='.bak') as file:
for line in file:
print(line.replace(defaultPortSetting, portSetting), end='')
os.chmod('/etc/syslog-ng/conf.d/azuremonitoragent-tcp.conf', stat.S_IRGRP | stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH)
restartRequired = True
if restartRequired == True:
run_command_and_log(get_service_command("syslog-ng", "restart"))
hutil_log_info("Installed local syslog configuration files and restarted syslog")
else:
if os.path.exists('/etc/rsyslog.d/') and not os.path.exists('/etc/rsyslog.d/10-azuremonitoragent.conf'):
if os.path.exists('/etc/rsyslog.d/10-azuremonitoragent-omfwd.conf'):
os.remove("/etc/rsyslog.d/10-azuremonitoragent-omfwd.conf")
copyfile("/etc/opt/microsoft/azuremonitoragent/syslog/rsyslogconf/05-azuremonitoragent-loadomuxsock.conf","/etc/rsyslog.d/05-azuremonitoragent-loadomuxsock.conf")
copyfile("/etc/opt/microsoft/azuremonitoragent/syslog/rsyslogconf/10-azuremonitoragent.conf","/etc/rsyslog.d/10-azuremonitoragent.conf")
os.chmod('/etc/rsyslog.d/05-azuremonitoragent-loadomuxsock.conf', stat.S_IRGRP | stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH)
os.chmod('/etc/rsyslog.d/10-azuremonitoragent.conf', stat.S_IRGRP | stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH)
run_command_and_log(get_service_command("rsyslog", "restart"))
hutil_log_info("Installed local syslog configuration files and restarted syslog")
if os.path.exists('/etc/syslog-ng/syslog-ng.conf') and not os.path.exists('/etc/syslog-ng/conf.d/azuremonitoragent.conf'):
if os.path.exists('/etc/syslog-ng/conf.d/azuremonitoragent-tcp.conf'):
os.remove("/etc/syslog-ng/conf.d/azuremonitoragent-tcp.conf")
syslog_ng_confpath = os.path.join('/etc/syslog-ng/', 'conf.d')
if not os.path.exists(syslog_ng_confpath):
os.makedirs(syslog_ng_confpath)
copyfile("/etc/opt/microsoft/azuremonitoragent/syslog/syslog-ngconf/azuremonitoragent.conf","/etc/syslog-ng/conf.d/azuremonitoragent.conf")
os.chmod('/etc/syslog-ng/conf.d/azuremonitoragent.conf', stat.S_IRGRP | stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH)
run_command_and_log(get_service_command("syslog-ng", "restart"))
hutil_log_info("Installed local syslog configuration files and restarted syslog")
def remove_localsyslog_configs():
"""
Remove local syslog configuration files if present and restart syslog
"""
if os.path.exists('/etc/rsyslog.d/10-azuremonitoragent.conf') or os.path.exists('/etc/rsyslog.d/10-azuremonitoragent-omfwd.conf'):
if os.path.exists('/etc/rsyslog.d/10-azuremonitoragent-omfwd.conf'):
os.remove("/etc/rsyslog.d/10-azuremonitoragent-omfwd.conf")
if os.path.exists('/etc/rsyslog.d/10-azuremonitoragent.conf'):
os.remove("/etc/rsyslog.d/05-azuremonitoragent-loadomuxsock.conf")
os.remove("/etc/rsyslog.d/10-azuremonitoragent.conf")
run_command_and_log(get_service_command("rsyslog", "restart"))
hutil_log_info("Removed local syslog configuration files if found and restarted syslog")
if os.path.exists('/etc/syslog-ng/conf.d/azuremonitoragent.conf') or os.path.exists('/etc/syslog-ng/conf.d/azuremonitoragent-tcp.conf'):
if os.path.exists('/etc/syslog-ng/conf.d/azuremonitoragent-tcp.conf'):
os.remove("/etc/syslog-ng/conf.d/azuremonitoragent-tcp.conf")
if os.path.exists('/etc/syslog-ng/conf.d/azuremonitoragent.conf'):
os.remove("/etc/syslog-ng/conf.d/azuremonitoragent.conf")
run_command_and_log(get_service_command("syslog-ng", "restart"))
hutil_log_info("Removed local syslog configuration files if found and restarted syslog")
def metrics():
"""
Take care of setting up telegraf and ME for metrics if configuration is present
"""
pids_filepath = os.path.join(os.getcwd(), 'amametrics.pid')
py_pid = os.getpid()
with open(pids_filepath, 'w') as f:
f.write(str(py_pid) + '\n')
watcher_thread = Thread(target = metrics_watcher, args = [hutil_log_error, hutil_log_info])
watcher_thread.start()
watcher_thread.join()
return 0, ""
def syslogconfig():
"""
Take care of setting up syslog configuration change watcher
"""
pids_filepath = os.path.join(os.getcwd(), 'amasyslogconfig.pid')
py_pid = os.getpid()
with open(pids_filepath, 'w') as f:
f.write(str(py_pid) + '\n')
watcher_thread = Thread(target = syslogconfig_watcher, args = [hutil_log_error, hutil_log_info])
watcher_thread.start()
watcher_thread.join()
return 0, ""
# Dictionary of operations strings to methods
operations = {'Disable' : disable,
'Uninstall' : uninstall,
'Install' : install,
'Enable' : enable,
'Update' : update,
'Metrics' : metrics,
'Syslogconfig' : syslogconfig
}
def parse_context(operation):
"""
Initialize a HandlerUtil object for this operation.
If the required modules have not been imported, this will return None.
"""
hutil = None
if ('Utils.WAAgentUtil' in sys.modules
and 'Utils.HandlerUtil' in sys.modules):
try:
logFileName = 'extension.log'
hutil = HUtil.HandlerUtility(waagent.Log, waagent.Error, logFileName=logFileName)
hutil.do_parse_context(operation)
# parse_context may throw KeyError if necessary JSON key is not
# present in settings
except KeyError as e:
waagent_log_error('Unable to parse context with error: ' \
'{0}'.format(e))
raise ParameterMissingException
return hutil
def set_os_arch(operation):
"""
Checks if the current system architecture is present in the SupportedArch set and replaces
the package names accordingly
"""
global BundleFileName, SupportedArch
current_arch = platform.machine()
if current_arch in SupportedArch:
# Replace the AMA package name according to architecture
BundleFileName = BundleFileName.replace('x86_64', current_arch)
dynamicSSLPreviewFlagPath = PreviewFeaturesDirectory + 'useDynamicSSL'
if os.path.exists(dynamicSSLPreviewFlagPath):
BundleFileName = BundleFileName.replace('_' + current_arch, '.dynamicssl_' + current_arch)
# Rename the Arch appropriate metrics extension binary to MetricsExtension
MetricsExtensionDir = os.path.join(os.getcwd(), 'MetricsExtensionBin')
SupportedMEPath = os.path.join(MetricsExtensionDir, 'MetricsExtension_'+current_arch)
vm_dist, vm_ver = find_vm_distro(operation)
if current_arch == 'aarch64' and vm_dist.startswith('centos') and vm_ver.startswith('7'):
SupportedMEPath += '_centos7'
if os.path.exists(SupportedMEPath):
os.rename(SupportedMEPath, os.path.join(MetricsExtensionDir, 'MetricsExtension'))
# Cleanup unused ME binaries
for f in os.listdir(MetricsExtensionDir):
if f != 'MetricsExtension':
os.remove(os.path.join(MetricsExtensionDir, f))
def find_package_manager(operation):
"""
Checks if the dist is debian based or centos based and assigns the package manager accordingly
"""
global PackageManager, PackageManagerOptions, BundleFileName
dist, _ = find_vm_distro(operation)
dpkg_set = set(["debian", "ubuntu"])
rpm_set = set(["oracle", "ol", "redhat", "centos", "red hat", "suse", "sles", "opensuse", "cbl-mariner", "mariner", "rhel", "rocky", "alma", "amzn"])
for dpkg_dist in dpkg_set:
if dist.startswith(dpkg_dist):
PackageManager = "dpkg"
# OK to replace the /etc/default/azuremonitoragent, since the placeholders gets replaced again.
# Otherwise, the package manager prompts for action (Y/I/N/O/D/Z) [default=N]
PackageManagerOptions = "--force-overwrite --force-confnew"
BundleFileName = BundleFileNameDeb
break
for rpm_dist in rpm_set:
if dist.startswith(rpm_dist):
PackageManager = "rpm"
# Same as above.
PackageManagerOptions = "--force"
BundleFileName = BundleFileNameRpm
break
if PackageManager == "":
log_and_exit(operation, UnsupportedOperatingSystem, "The OS has neither rpm nor dpkg" )
def find_vm_distro(operation):
"""
Finds the Linux Distribution this vm is running on.
"""
vm_dist = vm_id = vm_ver = None
parse_manually = False
try:
vm_dist, vm_ver, vm_id = platform.linux_distribution()
except AttributeError:
try:
vm_dist, vm_ver, vm_id = platform.dist()
except AttributeError:
hutil_log_info("Falling back to /etc/os-release distribution parsing")
# Some python versions *IF BUILT LOCALLY* (ex 3.5) give string responses (ex. 'bullseye/sid') to platform.dist() function
# This causes exception in the method below. Thus adding a check to switch to manual parsing in this case
try:
temp_vm_ver = int(vm_ver.split('.')[0])
except:
parse_manually = True
if (not vm_dist and not vm_ver) or parse_manually: # SLES 15 and others
try:
with open('/etc/os-release', 'r') as fp:
for line in fp:
if line.startswith('ID='):
vm_dist = line.split('=')[1]
vm_dist = vm_dist.split('-')[0]
vm_dist = vm_dist.replace('\"', '').replace('\n', '')
elif line.startswith('VERSION_ID='):
vm_ver = line.split('=')[1]
vm_ver = vm_ver.replace('\"', '').replace('\n', '')
except:
log_and_exit(operation, IndeterminateOperatingSystem, 'Indeterminate operating system')
return vm_dist.lower(), vm_ver.lower()
def is_vm_supported_for_extension(operation):
"""
Checks if the VM this extension is running on is supported by AzureMonitorAgent
Returns for platform.linux_distribution() vary widely in format, such as
'7.3.1611' returned for a VM with CentOS 7, so the first provided
digits must match
The supported distros of the AzureMonitorLinuxAgent are allowed to utilize
this VM extension. All other distros will get error code 51
"""
supported_dists_x86_64 = {'redhat' : ['7', '8', '9'], # Rhel
'rhel' : ['7', '8', '9'], # Rhel
'centos' : ['7', '8'], # CentOS
'red hat' : ['7', '8', '9'], # Oracle, RHEL
'oracle' : ['7', '8', '9'], # Oracle
'ol' : ['7', '8', '9'], # Oracle Linux
'debian' : ['9', '10', '11'], # Debian
'ubuntu' : ['16.04', '18.04', '20.04', '22.04'], # Ubuntu
'suse' : ['12', '15'], 'sles' : ['12', '15'], # SLES
'cbl-mariner' : ['1'], # Mariner 1.0
'mariner' : ['2'], # Mariner 2.0
'rocky' : ['8', '9'], # Rocky
'alma' : ['8', '9'], # Alma
'opensuse' : ['15'], # openSUSE
'amzn' : ['2'] # Amazon Linux 2
}
supported_dists_aarch64 = {'red hat' : ['8'], # Rhel
'ubuntu' : ['18.04', '20.04'], # Ubuntu
'alma' : ['8'], # Alma
'centos' : ['7'], # CentOS
'mariner' : ['2'], # Mariner 2.0
'sles' : ['15'], # SLES
'debian' : ['11'] # Debian
}
if platform.machine() == 'aarch64':
supported_dists = supported_dists_aarch64
else:
supported_dists = supported_dists_x86_64
vm_supported = False
vm_dist, vm_ver = find_vm_distro(operation)
# Find this VM distribution in the supported list
for supported_dist in list(supported_dists.keys()):
if not vm_dist.startswith(supported_dist):
continue
# Check if this VM distribution version is supported
vm_ver_split = vm_ver.split('.')
for supported_ver in supported_dists[supported_dist]:
supported_ver_split = supported_ver.split('.')
# If vm_ver is at least as precise (at least as many digits) as
# supported_ver and matches all the supported_ver digits, then
# this VM is guaranteed to be supported
vm_ver_match = True
for idx, supported_ver_num in enumerate(supported_ver_split):
try:
supported_ver_num = int(supported_ver_num)
vm_ver_num = int(vm_ver_split[idx])
except IndexError:
vm_ver_match = False
break
if vm_ver_num != supported_ver_num:
vm_ver_match = False
break
if vm_ver_match:
vm_supported = True
break
if vm_supported:
break
return vm_supported, vm_dist, vm_ver
def exit_if_vm_not_supported(operation):
"""
Check if this VM distro and version are supported by the AzureMonitorLinuxAgent.
If VM is supported, find the package manager present in this distro
If this VM is not supported, log the proper error code and exit.
"""
vm_supported, vm_dist, vm_ver = is_vm_supported_for_extension(operation)
if not vm_supported:
log_and_exit(operation, UnsupportedOperatingSystem, 'Unsupported operating system: ' \
'{0} {1}'.format(vm_dist, vm_ver))
return 0
def get_ssl_cert_info(operation):
"""
Get the appropriate SSL_CERT_DIR / SSL_CERT_FILE based on the Linux distro
"""
name = value = None
distro, version = find_vm_distro(operation)
for name in ['ubuntu', 'debian']:
if distro.startswith(name):
return 'SSL_CERT_DIR', '/etc/ssl/certs'
for name in ['centos', 'redhat', 'red hat', 'oracle', 'ol', 'cbl-mariner', 'mariner', 'rhel', 'rocky', 'alma', 'amzn']:
if distro.startswith(name):
return 'SSL_CERT_FILE', '/etc/pki/tls/certs/ca-bundle.crt'
for name in ['suse', 'sles', 'opensuse']:
if distro.startswith(name):
if version.startswith('12'):
return 'SSL_CERT_DIR', '/var/lib/ca-certificates/openssl'
elif version.startswith('15'):
return 'SSL_CERT_DIR', '/etc/ssl/certs'
log_and_exit(operation, GenericErrorCode, 'Unable to determine values for SSL_CERT_DIR or SSL_CERT_FILE')
def is_arc_installed():
"""
Check if this is an Arc machine
"""
# Using systemctl to check this since Arc only supports VMs that have systemd
check_arc = os.system('systemctl status himdsd 1>/dev/null 2>&1')
return check_arc == 0
def get_arc_endpoint():
"""
Find the endpoint for Arc IMDS
"""
endpoint_filepath = '/lib/systemd/system.conf.d/azcmagent.conf'
endpoint = ''
try:
with open(endpoint_filepath, 'r') as f:
data = f.read()
endpoint = data.split("\"IMDS_ENDPOINT=")[1].split("\"\n")[0]
except:
hutil_log_error('Unable to load Arc IMDS endpoint from {0}'.format(endpoint_filepath))
return endpoint
def get_imds_endpoint():
"""
Find the appropriate endpoint (Azure or Arc) for IMDS
"""
azure_imds_endpoint = 'http://169.254.169.254/metadata/instance?api-version=2018-10-01'
if (is_arc_installed()):
hutil_log_info('Arc is installed, loading Arc-specific IMDS endpoint')
imds_endpoint = get_arc_endpoint()
if imds_endpoint:
imds_endpoint += '/metadata/instance?api-version=2019-08-15'
else:
# Fall back to the traditional IMDS endpoint; the cloud domain and VM
# resource id detection logic are resilient to failed queries to IMDS
imds_endpoint = azure_imds_endpoint
hutil_log_info('Falling back to default Azure IMDS endpoint')
else:
imds_endpoint = azure_imds_endpoint
hutil_log_info('Using IMDS endpoint "{0}"'.format(imds_endpoint))
return imds_endpoint
def get_azure_environment_and_region():
"""
Retreive the Azure environment and region from Azure or Arc IMDS
"""
imds_endpoint = get_imds_endpoint()
req = urllib.request.Request(imds_endpoint)
req.add_header('Metadata', 'True')
environment = region = None
try:
response = json.loads(urllib.request.urlopen(req).read())
if ('compute' in response):
if ('azEnvironment' in response['compute']):
environment = response['compute']['azEnvironment']
if ('location' in response['compute']):
region = response['compute']['location'].lower()
except urllib.error.HTTPError as e:
hutil_log_error('Request to Metadata service URL failed with an HTTPError: {0}'.format(e))
hutil_log_error('Response from Metadata service: {0}'.format(e.read()))
except:
hutil_log_error('Unexpected error from Metadata service')
return environment, region
def run_command_and_log(cmd, check_error = True, log_cmd = True):
"""
Run the provided shell command and log its output, including stdout and
stderr.
The output should not contain any PII, but the command might. In this case,
log_cmd should be set to False.
"""
exit_code, output = run_get_output(cmd, check_error, log_cmd)
if log_cmd:
hutil_log_info('Output of command "{0}": \n{1}'.format(cmd.rstrip(), output))
else:
hutil_log_info('Output: \n{0}'.format(output))
if "cannot open Packages database" in output:
# Install failures
# External issue. Package manager db is either corrupt or needs cleanup
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = MissingDependency
output += "Package manager database is in a bad state. Please recover package manager, db cache and try install again later."
elif "Permission denied" in output:
# Enable failures
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = MissingDependency
return exit_code, output
def run_command_with_retries_output(cmd, retries, retry_check, final_check = None,
check_error = True, log_cmd = True,
initial_sleep_time = InitialRetrySleepSeconds,
sleep_increase_factor = 1):
"""
Caller provides a method, retry_check, to use to determine if a retry
should be performed. This must be a function with two parameters:
exit_code and output
The final_check can be provided as a method to perform a final check after
retries have been exhausted
Logic used: will retry up to retries times with initial_sleep_time in
between tries
If the retry_check retuns True for retry_verbosely, we will try cmd with
the standard -v verbose flag added
"""
try_count = 0
sleep_time = initial_sleep_time
run_cmd = cmd
run_verbosely = False
while try_count <= retries:
if run_verbosely:
run_cmd = cmd + ' -v'
exit_code, output = run_command_and_log(run_cmd, check_error, log_cmd)
should_retry, retry_message, run_verbosely = retry_check(exit_code,
output)
if not should_retry:
break
try_count += 1
hutil_log_info(retry_message)
time.sleep(sleep_time)
sleep_time *= sleep_increase_factor
if final_check is not None:
exit_code = final_check(exit_code, output)
return exit_code, output
def is_dpkg_or_rpm_locked(exit_code, output):
"""
If dpkg is locked, the output will contain a message similar to 'dpkg
status database is locked by another process'
"""
if exit_code != 0:
dpkg_locked_search = r'^.*dpkg.+lock.*$'
dpkg_locked_re = re.compile(dpkg_locked_search, re.M)
if dpkg_locked_re.search(output):
return True
rpm_locked_search = r'^.*rpm.+lock.*$'
rpm_locked_re = re.compile(rpm_locked_search, re.M)
if rpm_locked_re.search(output):
return True
return False
def retry_if_dpkg_or_rpm_locked(exit_code, output):
"""
Some commands fail because the package manager is locked (apt-get/dpkg
only); this will allow retries on failing commands.
"""
retry_verbosely = False
dpkg_or_rpm_locked = is_dpkg_or_rpm_locked(exit_code, output)
if dpkg_or_rpm_locked:
return True, 'Retrying command because package manager is locked.', \
retry_verbosely
else:
return False, '', False
def final_check_if_dpkg_or_rpm_locked(exit_code, output):
"""
If dpkg or rpm is still locked after the retries, we want to return a specific
error code
"""
dpkg_or_rpm_locked = is_dpkg_or_rpm_locked(exit_code, output)
if dpkg_or_rpm_locked:
exit_code = DPKGOrRPMLockedErrorCode
return exit_code
def get_settings():
"""
Retrieve the configuration for this extension operation
"""
global SettingsDict
public_settings = None
protected_settings = None
if HUtilObject is not None:
public_settings = HUtilObject.get_public_settings()
protected_settings = HUtilObject.get_protected_settings()
elif SettingsDict is not None:
public_settings = SettingsDict['public_settings']
protected_settings = SettingsDict['protected_settings']
else:
SettingsDict = {}
handler_env = get_handler_env()
try:
config_dir = str(handler_env['handlerEnvironment']['configFolder'])
except:
config_dir = os.path.join(os.getcwd(), 'config')
seq_no = get_latest_seq_no()
settings_path = os.path.join(config_dir, '{0}.settings'.format(seq_no))
try:
with open(settings_path, 'r') as settings_file:
settings_txt = settings_file.read()
settings = json.loads(settings_txt)
h_settings = settings['runtimeSettings'][0]['handlerSettings']
public_settings = h_settings['publicSettings']
SettingsDict['public_settings'] = public_settings
except:
hutil_log_error('Unable to load handler settings from ' \
'{0}'.format(settings_path))
if ('protectedSettings' in h_settings
and 'protectedSettingsCertThumbprint' in h_settings
and h_settings['protectedSettings'] is not None
and h_settings['protectedSettingsCertThumbprint'] is not None):
encoded_settings = h_settings['protectedSettings']
settings_thumbprint = h_settings['protectedSettingsCertThumbprint']
encoded_cert_path = os.path.join('/var/lib/waagent',
'{0}.crt'.format(
settings_thumbprint))
encoded_key_path = os.path.join('/var/lib/waagent',
'{0}.prv'.format(
settings_thumbprint))
decoded_settings = base64.standard_b64decode(encoded_settings)
decrypt_cmd = 'openssl smime -inform DER -decrypt -recip {0} ' \
'-inkey {1}'.format(encoded_cert_path,
encoded_key_path)
try:
session = subprocess.Popen([decrypt_cmd], shell = True,
stdin = subprocess.PIPE,
stderr = subprocess.STDOUT,
stdout = subprocess.PIPE)
output = session.communicate(decoded_settings)
except OSError:
pass
protected_settings_str = output[0]
if protected_settings_str is None:
log_and_exit('Enable', GenericErrorCode, 'Failed decrypting protectedSettings')
protected_settings = ''
try:
protected_settings = json.loads(protected_settings_str)
except:
hutil_log_error('JSON exception decoding protected settings')
SettingsDict['protected_settings'] = protected_settings
return public_settings, protected_settings
def update_status_file(operation, exit_code, exit_status, message):
"""
Mimic HandlerUtil method do_status_report in case hutil method is not
available
Write status to status file
"""
handler_env = get_handler_env()
try:
extension_version = str(handler_env['version'])
status_dir = str(handler_env['handlerEnvironment']['statusFolder'])
except:
extension_version = "1.0"
status_dir = os.path.join(os.getcwd(), 'status')
status_txt = [{
"version" : extension_version,
"timestampUTC" : time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
"status" : {
"name" : "Microsoft.Azure.Monitor.AzureMonitorLinuxAgent",
"operation" : operation,
"status" : exit_status,
"code" : exit_code,
"formattedMessage" : {
"lang" : "en-US",
"message" : message
}
}
}]
status_json = json.dumps(status_txt)
# Find the most recently changed config file and then use the
# corresponding status file
latest_seq_no = get_latest_seq_no()
status_path = os.path.join(status_dir, '{0}.status'.format(latest_seq_no))
status_tmp = '{0}.tmp'.format(status_path)
with open(status_tmp, 'w+') as tmp_file:
tmp_file.write(status_json)
os.rename(status_tmp, status_path)
def get_handler_env():
"""
Set and retrieve the contents of HandlerEnvironment.json as JSON
"""
global HandlerEnvironment
if HandlerEnvironment is None:
handler_env_path = os.path.join(os.getcwd(), 'HandlerEnvironment.json')
try:
with open(handler_env_path, 'r') as handler_env_file:
handler_env_txt = handler_env_file.read()
handler_env = json.loads(handler_env_txt)
if type(handler_env) == list:
handler_env = handler_env[0]
HandlerEnvironment = handler_env
except Exception as e:
waagent_log_error(str(e))
return HandlerEnvironment
def get_latest_seq_no():
"""
Determine the latest operation settings number to use
"""
global SettingsSequenceNumber
if SettingsSequenceNumber is None:
handler_env = get_handler_env()
try:
config_dir = str(handler_env['handlerEnvironment']['configFolder'])
except:
config_dir = os.path.join(os.getcwd(), 'config')
latest_seq_no = -1
cur_seq_no = -1
latest_time = None
try:
for dir_name, sub_dirs, file_names in os.walk(config_dir):
for file_name in file_names:
file_basename = os.path.basename(file_name)
match = re.match(r'[0-9]{1,10}\.settings', file_basename)
if match is None:
continue
cur_seq_no = int(file_basename.split('.')[0])
file_path = os.path.join(config_dir, file_name)
cur_time = os.path.getmtime(file_path)
if latest_time is None or cur_time > latest_time:
latest_time = cur_time
latest_seq_no = cur_seq_no
except:
pass
if latest_seq_no < 0:
latest_seq_no = 0
SettingsSequenceNumber = latest_seq_no
return SettingsSequenceNumber
def run_get_output(cmd, chk_err = False, log_cmd = True):
"""
Mimic waagent mothod RunGetOutput in case waagent is not available
Run shell command and return exit code and output
"""
if 'Utils.WAAgentUtil' in sys.modules:
# WALinuxAgent-2.0.14 allows only 2 parameters for RunGetOutput
# If checking the number of parameters fails, pass 2
try:
sig = inspect.signature(waagent.RunGetOutput)
params = sig.parameters
waagent_params = len(params)
except:
try:
spec = inspect.getargspec(waagent.RunGetOutput)
params = spec.args
waagent_params = len(params)
except:
waagent_params = 2
if waagent_params >= 3:
exit_code, output = waagent.RunGetOutput(cmd, chk_err, log_cmd)
else:
exit_code, output = waagent.RunGetOutput(cmd, chk_err)
else:
try:
output = subprocess.check_output(cmd, stderr = subprocess.STDOUT,
shell = True)
exit_code = 0
except subprocess.CalledProcessError as e:
exit_code = e.returncode
output = e.output
output = output.encode('utf-8')
# On python 3, encode returns a byte object, so we must decode back to a string
if sys.version_info >= (3,):
output = output.decode('utf-8', 'ignore')
return exit_code, output.strip()
def init_waagent_logger():
"""
Initialize waagent logger
If waagent has not been imported, catch the exception
"""
try:
waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout', True)
except Exception as e:
print('Unable to initialize waagent log because of exception ' \
'{0}'.format(e))
def waagent_log_info(message):
"""
Log informational message, being cautious of possibility that waagent may
not be imported
"""
if 'Utils.WAAgentUtil' in sys.modules:
waagent.Log(message)
else:
print('Info: {0}'.format(message))
def waagent_log_error(message):
"""
Log error message, being cautious of possibility that waagent may not be
imported
"""
if 'Utils.WAAgentUtil' in sys.modules:
waagent.Error(message)
else:
print('Error: {0}'.format(message))
def hutil_log_info(message):
"""
Log informational message, being cautious of possibility that hutil may
not be imported and configured
"""
if HUtilObject is not None:
HUtilObject.log(message)
else:
print('Info: {0}'.format(message))
def hutil_log_error(message):
"""
Log error message, being cautious of possibility that hutil may not be
imported and configured
"""
if HUtilObject is not None:
HUtilObject.error(message)
else:
print('Error: {0}'.format(message))
def log_and_exit(operation, exit_code = GenericErrorCode, message = ''):
"""
Log the exit message and perform the exit
"""
if exit_code == 0:
waagent_log_info(message)
hutil_log_info(message)
exit_status = 'success'
else:
waagent_log_error(message)
hutil_log_error(message)
exit_status = 'failed'
if HUtilObject is not None:
HUtilObject.do_exit(exit_code, operation, exit_status, str(exit_code),
message)
else:
update_status_file(operation, str(exit_code), exit_status, message)
sys.exit(exit_code)
# Exceptions
# If these exceptions are expected to be caught by the main method, they
# include an error_code field with an integer with which to exit from main
class AzureMonitorAgentForLinuxException(Exception):
"""
Base exception class for all exceptions; as such, its error code is the
basic error code traditionally returned in Linux: 1
"""
error_code = GenericErrorCode
def get_error_message(self, operation):
"""
Return a descriptive error message based on this type of exception
"""
return '{0} failed with exit code {1}'.format(operation,
self.error_code)
class ParameterMissingException(AzureMonitorAgentForLinuxException):
"""
There is a missing parameter for the AzureMonitorLinuxAgent Extension
"""
error_code = MissingorInvalidParameterErrorCode
def get_error_message(self, operation):
return '{0} failed due to a missing parameter: {1}'.format(operation,
self.error_code)
if __name__ == '__main__' :
main()
|
889992570e1e5c0450b1c9660e43b9d30de23662
|
2617bfec230858814b32795c6a47249c54a15cac
|
/tests/clpy_tests/opencl_tests/test_memory.py
|
878e57491936287cdca9bc58eeed13b396393629
|
[
"MIT",
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
fixstars/clpy
|
a06a1281887470d8faee3ec204b56fbef2496fab
|
693485f85397cc110fa45803c36c30c24c297df0
|
refs/heads/clpy
| 2021-06-10T04:00:30.974447
| 2021-02-28T06:01:26
| 2021-02-28T06:01:26
| 136,439,592
| 154
| 20
|
NOASSERTION
| 2021-04-07T02:41:03
| 2018-06-07T07:33:04
|
Python
|
UTF-8
|
Python
| false
| false
| 15,673
|
py
|
test_memory.py
|
# -*- coding: utf-8 -*-
import unittest
import numpy
import clpy
import clpy.testing
class TestSingleDeviceMemoryPool(unittest.TestCase):
"""test class of SingleDeviceMemoryPool"""
def setUp(self):
self.pool = clpy.backend.memory.SingleDeviceMemoryPool()
def test_malloc(self):
p = self.pool.malloc(1)
self.assertFalse(p.buf.isNull())
def test_scalar(self):
for type in [numpy.int8, numpy.int16, numpy.int32, numpy.int64,
numpy.uint8, numpy.uint16, numpy.uint32, numpy.uint64,
numpy.float32, numpy.float64]:
expected = type(12.5)
buf = self.pool.malloc(expected.nbytes)
host_buf = numpy.array(expected)
hostptr = host_buf.ctypes.get_as_parameter().value
clpy.testing.writebuf(buffer_to_write=buf.buf,
n_bytes=expected.nbytes,
host_ptr=hostptr)
actual = numpy.empty(1, dtype=type)
ahostptr = actual.ctypes.get_as_parameter().value
clpy.testing.readbuf(buffer_to_read=buf.buf,
n_bytes=expected.nbytes,
host_ptr=ahostptr)
self.assertEqual(expected, actual[0])
def test_vector(self):
for type in [numpy.int8, numpy.int16, numpy.int32, numpy.int64,
numpy.uint8, numpy.uint16, numpy.uint32, numpy.uint64,
numpy.float32, numpy.float64]:
expected = numpy.array([12.5, 13.4, 14.3, 15.2, 16.1], dtype=type)
buf = self.pool.malloc(expected.nbytes)
hostptr = expected.ctypes.get_as_parameter().value
clpy.testing.writebuf(buffer_to_write=buf.buf,
n_bytes=expected.nbytes,
host_ptr=hostptr)
actual = numpy.empty(5, dtype=type)
ahostptr = actual.ctypes.get_as_parameter().value
clpy.testing.readbuf(buffer_to_read=buf.buf, n_bytes=actual.nbytes,
host_ptr=ahostptr)
self.assertTrue(numpy.all(expected == actual))
def test_no_melt(self):
expected0 = numpy.array(
[[1.234, 56.78], [1.234, 56.78]], dtype="float64")
expected1 = numpy.array(
[[10234, 56078], [10234, 56078]], dtype="uint32")
buf0 = self.pool.malloc(expected0.nbytes)
buf1 = self.pool.malloc(expected1.nbytes)
hostptr0 = expected0.ctypes.get_as_parameter().value
hostptr1 = expected1.ctypes.get_as_parameter().value
clpy.testing.writebuf(buffer_to_write=buf0.buf,
n_bytes=expected0.nbytes,
host_ptr=hostptr0)
clpy.testing.writebuf(buffer_to_write=buf1.buf,
n_bytes=expected1.nbytes,
host_ptr=hostptr1)
actual0 = numpy.empty(expected0.shape, dtype=expected0.dtype)
actual1 = numpy.empty(expected1.shape, dtype=expected1.dtype)
clpy.testing.readbuf(buffer_to_read=buf0.buf, n_bytes=actual0.nbytes,
host_ptr=actual0.ctypes.get_as_parameter().value)
clpy.testing.readbuf(buffer_to_read=buf1.buf, n_bytes=actual1.nbytes,
host_ptr=actual1.ctypes.get_as_parameter().value)
self.assertTrue(numpy.all(expected0 == actual0))
self.assertTrue(numpy.all(expected1 == actual1))
class TestMemoryPointer(unittest.TestCase):
"""test class of MemoryPointer"""
def test_offset_read(self):
count = 128
totaldata = numpy.arange(count, dtype="uint64")
mem = clpy.backend.Memory(totaldata.nbytes)
hostptr = totaldata.ctypes.get_as_parameter().value
clpy.testing.writebuf(buffer_to_write=mem.buf,
n_bytes=totaldata.nbytes,
host_ptr=hostptr)
itemsize = totaldata.itemsize
for offset in range(0, count, 32):
ptr = clpy.backend.MemoryPointer(mem, totaldata.itemsize * offset)
actual = numpy.empty(count - offset, totaldata.dtype)
hostptr = actual.ctypes.get_as_parameter().value
clpy.testing.readbuf(buffer_to_read=ptr.buf,
offset=offset * itemsize,
n_bytes=actual.nbytes, host_ptr=hostptr)
expected = totaldata[offset:]
self.assertTrue(numpy.all(actual == expected))
def test_offset_write(self):
count = 256
step = 32
expected = numpy.empty(count, dtype="uint64")
mem = clpy.backend.Memory(expected.nbytes)
for offset in range(0, count // 2, step):
ptr = clpy.backend.MemoryPointer(mem, expected.itemsize * offset)
val = offset + 1
data = numpy.array([val] * (count - offset * 2), expected.dtype)
hostptr = data.ctypes.get_as_parameter().value
clpy.testing.writebuf(buffer_to_write=ptr.buf, offset=ptr.offset,
n_bytes=data.nbytes,
host_ptr=hostptr)
expected[offset:offset + step] = val
if offset == 0:
expected[-step:] = val
else:
expected[-offset - step:-offset] = val
actual = numpy.empty(count, dtype="uint64")
clpy.testing.readbuf(buffer_to_read=mem.buf, n_bytes=actual.nbytes,
host_ptr=actual.ctypes.get_as_parameter().value)
self.assertTrue(numpy.all(actual == expected))
def test_add_read(self):
count = 128
step = 32
totaldata = numpy.arange(count, dtype="uint64")
mem = clpy.backend.Memory(totaldata.nbytes)
hostptr = totaldata.ctypes.get_as_parameter().value
clpy.testing.writebuf(buffer_to_write=mem.buf,
n_bytes=totaldata.nbytes,
host_ptr=hostptr)
ptr = clpy.backend.MemoryPointer(mem, 0)
for offset in range(0, count, step):
actual = numpy.empty(count - offset, totaldata.dtype)
hostptr = actual.ctypes.get_as_parameter().value
clpy.testing.readbuf(buffer_to_read=ptr.buf, offset=ptr.offset,
n_bytes=actual.nbytes,
host_ptr=hostptr)
expected = totaldata[offset:]
self.assertTrue(numpy.all(actual == expected))
ptr = ptr + totaldata.itemsize * step
def test_add_write(self):
count = 256
step = 32
expected = numpy.empty(count, dtype="uint64")
mem = clpy.backend.Memory(expected.nbytes)
ptr = clpy.backend.MemoryPointer(mem, 0)
for offset in range(0, count // 2, step):
val = offset + 1
data = numpy.array([val] * (count - offset * 2), expected.dtype)
hostptr = data.ctypes.get_as_parameter().value
clpy.testing.writebuf(buffer_to_write=ptr.buf, offset=ptr.offset,
n_bytes=data.nbytes,
host_ptr=hostptr)
expected[offset:offset + step] = val
if offset == 0:
expected[-step:] = val
else:
expected[-offset - step:-offset] = val
ptr = ptr + expected.itemsize * step
actual = numpy.empty(count, dtype="uint64")
clpy.testing.readbuf(buffer_to_read=mem.buf, n_bytes=actual.nbytes,
host_ptr=actual.ctypes.get_as_parameter().value)
self.assertTrue(numpy.all(actual == expected))
class TestSingleDeviceMemoryPoolwithChunk(unittest.TestCase):
"""test class of SingleDeviceMemoryPool"""
def setUp(self):
# create chunk and free to prepare chunk in pool
self.pool = clpy.backend.memory.SingleDeviceMemoryPool()
clpy.backend.memory.set_allocator(self.pool.malloc)
self.pooled_chunk_size = self.pool._allocation_unit_size * 2
self.tmp = self.pool.malloc(self.pooled_chunk_size)
self.pool.free(self.tmp.buf, self.pooled_chunk_size, 0)
def tearDown(self):
clpy.backend.memory.set_allocator()
def test_chunk_copy_from_host(self):
size = 2
dtype = numpy.float32
wrong_value = 0
correct_value = 1
dummy = clpy.empty(size, dtype)
dummy.fill(wrong_value)
# check offset != 0
clpy_array = clpy.empty(size, dtype)
self.assertTrue(clpy_array.data.mem.offset != 0)
# write wrong_value to clpy_array
tmp = numpy.full(shape=size, fill_value=wrong_value, dtype=dtype)
nbytes = tmp.nbytes
ptr = tmp.ctypes.get_as_parameter().value
clpy.testing.writebuf(
buffer_to_write=clpy_array.data.buf,
# MemoryPointer.offset + Chunk.offset
offset=clpy_array.data.offset + clpy_array.data.mem.offset,
n_bytes=nbytes,
host_ptr=ptr,
)
# write correct_value to clpy array by copy_from_host
expected = numpy.full(
shape=size, fill_value=correct_value, dtype=dtype)
clpy_array.data.copy_from_host(
expected.ctypes.get_as_parameter(), clpy_array.nbytes)
# read clpy_array
actual = numpy.empty(shape=size, dtype=dtype)
nbytes = actual.nbytes
ptr = actual.ctypes.get_as_parameter().value
clpy.testing.readbuf(
buffer_to_read=clpy_array.data.buf,
# MemoryPointer offset + Chunked offset
offset=clpy_array.data.offset + clpy_array.data.mem.offset,
n_bytes=nbytes,
host_ptr=ptr,
)
self.assertTrue(numpy.allclose(actual, expected))
def test_chunk_copy_to_host(self):
size = 2
dtype = numpy.float32
wrong_value = 0
correct_value = 1
dummy = clpy.empty(size, dtype)
dummy.fill(wrong_value)
# chunk offset != 0
clpy_array = clpy.empty(size, dtype)
self.assertTrue(clpy_array.data.mem.offset != 0)
# write correct_value to clpy_array
expected = numpy.full(
shape=size, fill_value=correct_value, dtype=dtype)
nbytes = expected.nbytes
ptr = expected.ctypes.get_as_parameter().value
clpy.testing.writebuf(
buffer_to_write=clpy_array.data.buf,
# MemoryPointer.offset + Chunk.offset
offset=clpy_array.data.offset + clpy_array.data.mem.offset,
n_bytes=nbytes,
host_ptr=ptr,
)
# read clpy_array to ptr by copy_to_host
actual = numpy.full(shape=size, fill_value=wrong_value, dtype=dtype)
ptr = actual.ctypes.get_as_parameter()
nbytes = actual.nbytes
clpy_array.data.copy_to_host(ptr, nbytes)
self.assertTrue(numpy.allclose(actual, expected))
def test_chunk_function(self):
size = 2
dtype = numpy.float32
wrong_value = 0
correct_value = 1
dummy = clpy.empty(size, dtype)
dummy.fill(wrong_value)
# chunk offset != 0
clpy_array = clpy.empty(size, dtype)
self.assertTrue(clpy_array.data.mem.offset != 0)
# write wrong_value to clpy_array
tmp = numpy.full(shape=size, fill_value=wrong_value, dtype=dtype)
nbytes = tmp.nbytes
ptr = tmp.ctypes.get_as_parameter().value
clpy.testing.writebuf(
buffer_to_write=clpy_array.data.buf,
# MemoryPointer.offset + Chunk.offset
offset=clpy_array.data.offset + clpy_array.data.mem.offset,
n_bytes=nbytes,
host_ptr=ptr,
)
# write correct_value to clpy_array by fill (function.pyx)
clpy_array.fill(correct_value)
# read clpy_array
actual = numpy.empty(shape=size, dtype=dtype)
nbytes = actual.nbytes
ptr = actual.ctypes.get_as_parameter().value
clpy.testing.readbuf(
buffer_to_read=clpy_array.data.buf,
# MemoryPointer offset + Chunked offset
offset=clpy_array.data.offset + clpy_array.data.mem.offset,
n_bytes=nbytes,
host_ptr=ptr,
)
expected = numpy.full(size, fill_value=correct_value, dtype=dtype)
self.assertTrue(numpy.allclose(actual, expected))
def test_chunk_copy_from_device_src(self):
size = 2
dtype = numpy.float32
wrong_value = 0
correct_value = 1
dummy = clpy.empty(size, dtype)
dummy.fill(wrong_value)
# chunk offset != 0
src_array = clpy.empty(size, dtype)
self.assertTrue(src_array.data.mem.offset != 0)
# dst_array should be different Chunk from src_array
# to avoid CL_MEM_COPY_OVERLAP
# chunk offset == 0
dst_array = clpy.empty(self.pooled_chunk_size, dtype)
self.assertTrue(dst_array.data.mem.offset == 0)
dst_array.fill(wrong_value)
# write correct_value to src_array
expected = numpy.full(
shape=size, fill_value=correct_value, dtype=dtype)
nbytes = expected.nbytes
ptr = expected.ctypes.get_as_parameter().value
clpy.testing.writebuf(
buffer_to_write=src_array.data.buf,
# MemoryPointer.offset + Chunk.offset
offset=src_array.data.offset + src_array.data.mem.offset,
n_bytes=nbytes,
host_ptr=ptr,
)
# copy src_array to dst_array by copy_from_device
dst_array.data.copy_from_device(src_array.data, src_array.nbytes)
actual = dst_array.get()[0:2]
self.assertTrue(numpy.allclose(actual, expected))
def test_chunk_copy_from_device_dst(self):
size = 2
dtype = numpy.float32
wrong_value = 0
correct_value = 1
dummy = clpy.empty(size, dtype)
dummy.fill(wrong_value)
# chunk offset != 0
dst_array = clpy.empty(size, dtype)
self.assertTrue(dst_array.data.mem.offset != 0)
# write wrong_value to dst_array
tmp = numpy.full(shape=size, fill_value=wrong_value, dtype=dtype)
nbytes = tmp.nbytes
ptr = tmp.ctypes.get_as_parameter().value
clpy.testing.writebuf(
buffer_to_write=dst_array.data.buf,
# MemoryPointer.offset + Chunk.offset
offset=dst_array.data.offset + dst_array.data.mem.offset,
n_bytes=nbytes,
host_ptr=ptr,
)
# src_array should be different Chunk from dst_array
# to avoid CL_MEM_COPY_OVERLAP
# chunk with offset == 0
src_array = clpy.empty(self.pooled_chunk_size, dtype)
self.assertTrue(src_array.data.mem.offset == 0)
src_array.fill(correct_value)
# copy src_array to dst_array by copy_from_device
dst_array.data.copy_from_device(src_array.data, dst_array.nbytes)
# read dst_array
actual = numpy.empty(shape=size, dtype=dtype)
nbytes = actual.nbytes
ptr = actual.ctypes.get_as_parameter().value
clpy.testing.readbuf(
buffer_to_read=dst_array.data.buf,
# MemoryPointer offset + Chunked offset
offset=dst_array.data.offset + dst_array.data.mem.offset,
n_bytes=nbytes,
host_ptr=ptr,
)
expected = src_array.get()[0:2]
self.assertTrue(numpy.allclose(actual, expected))
if __name__ == "__main__":
unittest.main()
|
1f4cb86a176859a50cb911f001241c1acc3c335a
|
2d0d2a6b2d9cf2489200cddf54f24b5ba09f0675
|
/ParseIDT.py
|
31a93d8850cc289ac3dbb548a9f443f5aee2a7b6
|
[] |
no_license
|
gerhart01/Hyper-V-scripts
|
35287b39c488c61f42b8bce161673bbfbe711728
|
3bced0763ee6b1c3b2c628ec3f11ac4b24a65cd4
|
refs/heads/master
| 2023-05-05T21:03:32.055206
| 2023-04-18T15:28:43
| 2023-04-18T15:28:43
| 85,430,075
| 103
| 29
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 841
|
py
|
ParseIDT.py
|
import idaapi
idtr_str = Eval('send_dbg_command("r idtr")')
idtr = long(idtr_str[5:-1],16)
print "idtr = 0x%x" % idtr
i = 256
for i in range(0,256):
buf = idaapi.dbg_read_memory(idtr+16*i, 16)
isr = 0
isr = isr + (ord(buf[11]) << (8*7))
isr = isr + (ord(buf[10]) << (8*6))
isr = isr + (ord(buf[9]) << (8*5))
isr = isr + (ord(buf[8]) << (8*4))
isr = isr + (ord(buf[7]) << (8*3))
isr = isr + (ord(buf[6]) << (8*2))
isr = isr + (ord(buf[1]) << (8*1))
isr = isr + (ord(buf[0]) << (8*0))
#for j in range(6,12):
# isr = isr+(ord(buf[j]) << (8*(j-4)))
#for j in range(0,2):
# isr = isr+(ord(buf[j]) << (8*(j)))
print "isr %x address = " % i,hex(isr)
idc.create_insn(isr)
idc.add_func(isr)
set_name(isr,str('mISR_') + hex(i).upper(), SN_NOWARN)
|
c082926c91fedbb302e1dcec4e418b6b12926622
|
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
|
/tests/ut/python/graph_syntax/python_builtin_functions/test_enumerate.py
|
bb103140b82671f6eca1deb0abe028820be926d0
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
mindspore-ai/mindspore
|
ca7d5bb51a3451c2705ff2e583a740589d80393b
|
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
|
refs/heads/master
| 2023-07-29T09:17:11.051569
| 2023-07-17T13:14:15
| 2023-07-17T13:14:15
| 239,714,835
| 4,178
| 768
|
Apache-2.0
| 2023-07-26T22:31:11
| 2020-02-11T08:43:48
|
C++
|
UTF-8
|
Python
| false
| false
| 7,606
|
py
|
test_enumerate.py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test enumerate"""
import operator
import numpy as np
import pytest
import mindspore.nn as nn
from mindspore import jit, context, Tensor
context.set_context(mode=context.GRAPH_MODE)
def test_enumerate_list_const():
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.value = [11, 22, 33, 44]
def construct(self):
index_sum = 0
value_sum = 0
for i, j in enumerate(self.value):
index_sum += i
value_sum += j
return index_sum, value_sum
net = Net()
assert net() == (6, 110)
def test_enumerate_tuple_const():
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.value = (11, 22, 33, 44)
def construct(self):
index_sum = 0
value_sum = 0
for i, j in enumerate(self.value):
index_sum += i
value_sum += j
return index_sum, value_sum
net = Net()
assert net() == (6, 110)
def test_enumerate_tensor_const():
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.value = Tensor(np.arange(2 * 3).reshape(2, 3))
def construct(self):
return enumerate(self.value)
net = Net()
net()
def test_enumerate_list_parameter():
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
def construct(self, x, y):
index_sum = 0
value = [x, y]
ret = ()
for i, j in enumerate(value):
index_sum += i
ret += (j,)
return index_sum, ret
x = Tensor(np.arange(4))
net = Net()
net(x, x)
def test_enumerate_tuple_parameter():
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
def construct(self, x, y):
index_sum = 0
value = (x, y)
ret = ()
for i, j in enumerate(value):
index_sum += i
ret += (j,)
return index_sum, ret
x = Tensor(np.arange(4))
net = Net()
net(x, x)
def test_enumerate_tensor_parameter():
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
def construct(self, x):
index_sum = 0
ret = ()
for i, j in enumerate(x):
index_sum += i
ret += (j,)
return index_sum, ret
x = Tensor(np.arange(2 * 3).reshape(2, 3))
net = Net()
net(x)
def test_enumerate_tuple_const_1():
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.value = (11, 22, 33, 44)
def construct(self):
index_sum = 0
value_sum = 0
for i in enumerate(self.value):
index_sum += i[0]
value_sum += i[1]
return index_sum, value_sum
net = Net()
assert net() == (6, 110)
def test_enumerate_tensor_const_1():
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.value = Tensor(np.arange(2*3).reshape(2, 3))
def construct(self):
index_sum = 0
ret = ()
for i in enumerate(self.value):
index_sum += i[0]
ret += (i[1],)
return index_sum, ret
net = Net()
net()
def test_enumerate_tuple_parameter_1():
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
def construct(self, x, y):
index_sum = 0
value = (x, y)
ret = ()
for i in enumerate(value):
index_sum += i[0]
ret += (i[1],)
return index_sum, ret
x = Tensor(np.arange(4))
net = Net()
net(x, x)
def test_enumerate_tensor_parameter_1():
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
def construct(self, x):
index_sum = 0
ret = ()
for i in enumerate(x):
index_sum += i[0]
ret += (i[1],)
return index_sum, ret
x = Tensor(np.arange(2 * 3).reshape(2, 3))
net = Net()
net(x)
def test_enumerate_tuple_const_2():
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.value = (11, 22, 33, 44)
def construct(self):
index_sum = 0
value_sum = 0
for i in enumerate(self.value, 1):
index_sum += i[0]
value_sum += i[1]
return index_sum, value_sum
net = Net()
assert net() == (10, 110)
def test_enumerate_tensor_const_2():
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.value = Tensor(np.arange(2 * 3).reshape(2, 3))
def construct(self):
index_sum = 0
ret = ()
for i in enumerate(self.value, 1):
index_sum += i[0]
ret += (i[1],)
return index_sum, ret
net = Net()
net()
def test_enumerate_tuple_parameter_2():
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
def construct(self, x, y):
index_sum = 0
value = (x, y)
ret = ()
for i in enumerate(value, 1):
index_sum += i[0]
ret += (i[1],)
return index_sum, ret
x = Tensor(np.arange(4))
net = Net()
net(x, x)
def test_enumerate_tensor_parameter_2():
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
def construct(self, x):
index_sum = 0
ret = ()
for i, j in enumerate(x, 1):
index_sum += i
ret += (j,)
return index_sum, ret
x = Tensor(np.arange(2 * 3).reshape(2, 3))
net = Net()
net(x)
def test_enumerate_start_type_error():
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
def construct(self, x):
return enumerate((x, x), start=1.2)
x = Tensor(np.arange(3 * 4 * 5).reshape((3, 4, 5)))
net = Net()
with pytest.raises(TypeError) as ex:
net(x)
assert "For 'enumerate', the 'start'" in str(ex.value)
def test_fallback_enumerate_with_numpy():
"""
Feature: JIT Fallback
Description: Test enumerate in graph mode with numpy input.
Expectation: No exception.
"""
@jit
def foo():
x = np.array([1, 2])
y = enumerate(x)
return tuple(y)
out = foo()
assert operator.eq(out, ((0, 1), (1, 2)))
|
17ca6fcf72bc9d612135d68edffc11400d867a56
|
7453911cee47edd9414ecfc66d189dc578f7e421
|
/benchmarks/bench_dns_resolver.py
|
ad24d810a04ba27ac1f5f3b9ad508cc8f0aa5df3
|
[
"Python-2.0",
"MIT"
] |
permissive
|
gevent/gevent
|
f20eca1852098e47f32eb062db646acfead36e71
|
6b22af0fa8eb2efa89fce36c35808948c67352b0
|
refs/heads/master
| 2023-08-31T19:27:29.410236
| 2023-08-31T10:26:35
| 2023-08-31T10:26:35
| 5,801,666
| 4,981
| 866
|
NOASSERTION
| 2023-09-13T14:16:59
| 2012-09-13T22:03:03
|
Python
|
UTF-8
|
Python
| false
| false
| 3,281
|
py
|
bench_dns_resolver.py
|
from __future__ import absolute_import, print_function, division
# Best run with dnsmasq configured as a caching nameserver
# with no timeouts and configured to point there via
# /etc/resolv.conf and GEVENT_RESOLVER_NAMESERVERS
# Remember to use --inherit-environ to make that work!
# dnsmasq -d --cache-size=100000 --local-ttl=1000000 --neg-ttl=10000000
# --max-ttl=100000000 --min-cache-ttl=10000000000 --no-poll --auth-ttl=100000000000
from gevent import monkey; monkey.patch_all()
import sys
import socket
import perf
import gevent
from zope.dottedname.resolve import resolve as drresolve
blacklist = {
22, 55, 68, 69, 72, 52, 94, 62, 54, 71, 73, 74, 34, 36,
83, 86, 79, 81, 98, 99, 120, 130, 152, 161, 165, 169,
172, 199, 205, 239, 235, 254, 256, 286, 299, 259, 229,
190, 185, 182, 173, 160, 158, 153, 139, 138, 131, 129,
127, 125, 116, 112, 110, 106,
}
RUN_COUNT = 15 if hasattr(sys, 'pypy_version_info') else 5
def quiet(f, n):
try:
f(n)
except socket.gaierror:
pass
def resolve_seq(res, count=10, begin=0):
for index in range(begin, count + begin):
if index in blacklist:
continue
try:
res.gethostbyname('x%s.com' % index)
except socket.gaierror:
pass
def resolve_par(res, count=10, begin=0):
gs = []
for index in range(begin, count + begin):
if index in blacklist:
continue
gs.append(gevent.spawn(quiet, res.gethostbyname, 'x%s.com' % index))
gevent.joinall(gs)
N = 300
def run_all(resolver_name, resolve):
res = drresolve('gevent.resolver.' + resolver_name + '.Resolver')
res = res()
# dnspython looks up cname aliases by default, but c-ares does not.
# dnsmasq can only cache one address with a given cname at a time,
# and many of our addresses clash on that, so dnspython is put at a
# severe disadvantage. We turn that off here.
res._getaliases = lambda hostname, family: []
if N > 150:
# 150 is the max concurrency in dnsmasq
count = N // 3
resolve(res, count=count)
resolve(res, count=count, begin=count)
resolve(res, count=count, begin=count * 2)
else:
resolve(res, count=N)
def main():
def worker_cmd(cmd, args):
cmd.extend(args.benchmark)
runner = perf.Runner(processes=5, values=3,
add_cmdline_args=worker_cmd)
all_names = 'dnspython', 'blocking', 'ares', 'thread'
runner.argparser.add_argument('benchmark',
nargs='*',
default='all',
choices=all_names + ('all',))
args = runner.parse_args()
if 'all' in args.benchmark or args.benchmark == 'all':
args.benchmark = ['all']
names = all_names
else:
names = args.benchmark
for name in names:
runner.bench_func(name + ' sequential',
run_all,
name, resolve_seq,
inner_loops=N)
runner.bench_func(name + ' parallel',
run_all,
name, resolve_par,
inner_loops=N)
if __name__ == '__main__':
main()
|
00292631815f6dc4cb0569fa808eee0fc66a84e9
|
5b6ba0f288b1e2ac236af846a9bf546a63228476
|
/mmtbx/conformation_dependent_library/LinkedResidues.py
|
4e0496fd4c9e50e7bcb30f09d3572540e24feeb1
|
[
"BSD-3-Clause-LBNL"
] |
permissive
|
cctbx/cctbx_project
|
5b547b416cadbdf95cca21dace9f54272a08d98a
|
7f4dfb6c873fd560920f697cbfd8a5ff6eed82fa
|
refs/heads/master
| 2023-08-17T17:44:05.077010
| 2023-08-16T22:40:22
| 2023-08-16T22:40:22
| 39,508,026
| 206
| 131
|
NOASSERTION
| 2023-09-14T17:12:55
| 2015-07-22T13:36:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,938
|
py
|
LinkedResidues.py
|
from __future__ import absolute_import, division, print_function
class LinkedResidues(list):
def __init__(self,
geometry,
length=3, # CDL & other psi/phi apps
allow_poly_ca=False,
registry=None,
include_non_linked=False,
):
assert registry is not None
self.length = length
self.geometry = geometry
self.registry = registry
if geometry is None:
self.bond_params_table = None
else:
self.bond_params_table = geometry.bond_params_table
self.errors = []
self.start = None
self.end = None
self.include_non_linked = include_non_linked
self.allow_poly_ca = allow_poly_ca
def __repr__(self):
if 1: return self.show()
outl = ''
for residue in self:
outl += '%s ' % residue.resname
return '"%s"\n' % outl
def show(self): assert 0
def show_detailed(self): assert 0
def atoms(self):
for residue in self:
for atom in residue.atoms():
yield atom
def is_pure_main_conf(self):
tmp = [rg.is_pure_main_conf for rg in self]
return len(list(filter(None, tmp)))==self.length
def are_linked(self, *args, **kwds): assert 0
def append(self, residue):
list.append(self, residue)
while len(self)>self.length:
del self[0]
if self.include_non_linked: return
if len(self)>=self.length-1:
while not self.are_linked():
del self[0]
if len(self)==0: break
def get_i_seqs(self): assert 0
def get_resnames(self):
rc = []
for residue in self: rc.append(residue.resname)
return rc
def is_pure_main_conf(self):
for one in self:
if not one.is_pure_main_conf: return False
return True
def altloc(self):
if self.is_pure_main_conf(): return ' '
rc=[]
for one in self:
rc.append(self[0].parent().altloc)
rc = list(filter(None,rc))
assert rc
return rc[0]
|
ea32e2470e1d0e6588d94d8920d963d3a79b036a
|
ec7591c3f478c43e76257aaa500d8f6a2e763d74
|
/stanza/tests/server/test_client.py
|
4697623591937834cf7a138699a04292c6aec3fb
|
[
"Apache-2.0"
] |
permissive
|
stanfordnlp/stanza
|
5cc3dbe70a96dd565639b7dae1efde6b4fa76985
|
c530c9af647d521262b56b717bcc38b0cfc5f1b8
|
refs/heads/main
| 2023-09-01T12:01:38.980322
| 2023-03-14T16:10:05
| 2023-03-14T16:10:05
| 104,854,615
| 4,281
| 599
|
NOASSERTION
| 2023-09-10T00:31:36
| 2017-09-26T08:00:56
|
Python
|
UTF-8
|
Python
| false
| false
| 12,826
|
py
|
test_client.py
|
"""
Tests that call a running CoreNLPClient.
"""
from http.server import BaseHTTPRequestHandler, HTTPServer
import multiprocessing
import pytest
import requests
import stanza.server as corenlp
import stanza.server.client as client
import shlex
import subprocess
import time
from stanza.models.constituency import tree_reader
from stanza.tests import *
# set the marker for this module
pytestmark = [pytest.mark.travis, pytest.mark.client]
TEXT = "Chris wrote a simple sentence that he parsed with Stanford CoreNLP.\n"
MAX_REQUEST_ATTEMPTS = 5
EN_GOLD = """
Sentence #1 (12 tokens):
Chris wrote a simple sentence that he parsed with Stanford CoreNLP.
Tokens:
[Text=Chris CharacterOffsetBegin=0 CharacterOffsetEnd=5 PartOfSpeech=NNP]
[Text=wrote CharacterOffsetBegin=6 CharacterOffsetEnd=11 PartOfSpeech=VBD]
[Text=a CharacterOffsetBegin=12 CharacterOffsetEnd=13 PartOfSpeech=DT]
[Text=simple CharacterOffsetBegin=14 CharacterOffsetEnd=20 PartOfSpeech=JJ]
[Text=sentence CharacterOffsetBegin=21 CharacterOffsetEnd=29 PartOfSpeech=NN]
[Text=that CharacterOffsetBegin=30 CharacterOffsetEnd=34 PartOfSpeech=WDT]
[Text=he CharacterOffsetBegin=35 CharacterOffsetEnd=37 PartOfSpeech=PRP]
[Text=parsed CharacterOffsetBegin=38 CharacterOffsetEnd=44 PartOfSpeech=VBD]
[Text=with CharacterOffsetBegin=45 CharacterOffsetEnd=49 PartOfSpeech=IN]
[Text=Stanford CharacterOffsetBegin=50 CharacterOffsetEnd=58 PartOfSpeech=NNP]
[Text=CoreNLP CharacterOffsetBegin=59 CharacterOffsetEnd=66 PartOfSpeech=NNP]
[Text=. CharacterOffsetBegin=66 CharacterOffsetEnd=67 PartOfSpeech=.]
""".strip()
class HTTPMockServerTimeoutContext:
""" For launching an HTTP server on certain port with an specified delay at responses """
def __init__(self, port, timeout_secs):
self.port = port
self.timeout_secs = timeout_secs
def __enter__(self):
class HTTPTimeoutHandler(BaseHTTPRequestHandler):
def do_POST(self_inner):
time.sleep(self.timeout_secs)
self_inner.send_response(200)
self_inner.send_header('Content-type', 'text/plain; charset=utf-8')
self_inner.end_headers()
self_inner.wfile.write("HTTPMockServerTimeout")
def run_webserver():
HTTPServer(('127.0.0.1',self.port), HTTPTimeoutHandler).serve_forever()
self.p = multiprocessing.Process(target=run_webserver, args=())
self.p.daemon = True
self.p.start()
def __exit__(self, exc_type, exc_value, exc_traceback):
self.p.terminate()
class TestCoreNLPClient:
@pytest.fixture(scope="class")
def corenlp_client(self):
""" Client to run tests on """
client = corenlp.CoreNLPClient(annotators='tokenize,ssplit,pos,lemma,ner,depparse',
server_id='stanza_main_test_server')
yield client
client.stop()
def test_connect(self, corenlp_client):
corenlp_client.ensure_alive()
assert corenlp_client.is_active
assert corenlp_client.is_alive()
def test_context_manager(self):
with corenlp.CoreNLPClient(annotators="tokenize,ssplit",
endpoint="http://localhost:9001") as context_client:
ann = context_client.annotate(TEXT)
assert corenlp.to_text(ann.sentence[0]) == TEXT[:-1]
def test_no_duplicate_servers(self):
"""We expect a second server on the same port to fail"""
with pytest.raises(corenlp.PermanentlyFailedException):
with corenlp.CoreNLPClient(annotators="tokenize,ssplit") as duplicate_server:
raise RuntimeError("This should have failed")
def test_annotate(self, corenlp_client):
ann = corenlp_client.annotate(TEXT)
assert corenlp.to_text(ann.sentence[0]) == TEXT[:-1]
def test_update(self, corenlp_client):
ann = corenlp_client.annotate(TEXT)
ann = corenlp_client.update(ann)
assert corenlp.to_text(ann.sentence[0]) == TEXT[:-1]
def test_tokensregex(self, corenlp_client):
pattern = '([ner: PERSON]+) /wrote/ /an?/ []{0,3} /sentence|article/'
matches = corenlp_client.tokensregex(TEXT, pattern)
assert len(matches["sentences"]) == 1
assert matches["sentences"][0]["length"] == 1
assert matches == {
"sentences": [{
"0": {
"text": "Chris wrote a simple sentence",
"begin": 0,
"end": 5,
"1": {
"text": "Chris",
"begin": 0,
"end": 1
}},
"length": 1
},]}
def test_semgrex(self, corenlp_client):
pattern = '{word:wrote} >nsubj {}=subject >obj {}=object'
matches = corenlp_client.semgrex(TEXT, pattern, to_words=True)
assert matches == [
{
"text": "wrote",
"begin": 1,
"end": 2,
"$subject": {
"text": "Chris",
"begin": 0,
"end": 1
},
"$object": {
"text": "sentence",
"begin": 4,
"end": 5
},
"sentence": 0,}]
def test_tregex(self, corenlp_client):
# the PP should be easy to parse
pattern = 'PP < NP'
matches = corenlp_client.tregex(TEXT, pattern)
print(matches)
assert matches == {
'sentences': [
{'0': {'sentIndex': 0, 'characterOffsetBegin': 45, 'codepointOffsetBegin': 45, 'characterOffsetEnd': 66, 'codepointOffsetEnd': 66,
'match': '(PP (IN with)\n (NP (NNP Stanford) (NNP CoreNLP)))\n',
'spanString': 'with Stanford CoreNLP', 'namedNodes': []}}
]
}
def ztest_tregex_trees(self, corenlp_client):
"""
Test the results of tregex run on trees w/o parsing
TODO: this needs a CoreNLP more recent than 4.5.1
"""
trees = tree_reader.read_trees("(ROOT (S (NP (NNP Jennifer)) (VP (VBZ has) (NP (JJ blue) (NN skin))))) (ROOT (S (NP (PRP I)) (VP (VBP like) (NP (PRP$ her) (NNS antennae)))))")
pattern = "VP < NP"
matches = corenlp_client.tregex(pattern=pattern, trees=trees)
assert matches == {
'sentences': [
{'0': {'sentIndex': 0, 'match': '(VP (VBZ has)\n (NP (JJ blue) (NN skin)))\n', 'spanString': 'has blue skin', 'namedNodes': []}},
{'0': {'sentIndex': 1, 'match': '(VP (VBP like)\n (NP (PRP$ her) (NNS antennae)))\n', 'spanString': 'like her antennae', 'namedNodes': []}}
]
}
def test_external_server_legacy_start_server(self):
""" Test starting up an external server and accessing with a client with start_server=False """
corenlp_home = client.resolve_classpath(None)
start_cmd = f'java -Xmx5g -cp "{corenlp_home}" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9001 ' \
f'-timeout 60000 -server_id stanza_external_server -serverProperties {SERVER_TEST_PROPS}'
start_cmd = start_cmd and shlex.split(start_cmd)
external_server_process = subprocess.Popen(start_cmd)
with corenlp.CoreNLPClient(start_server=False, endpoint="http://localhost:9001") as external_server_client:
ann = external_server_client.annotate(TEXT, annotators='tokenize,ssplit,pos', output_format='text')
assert external_server_process
external_server_process.terminate()
external_server_process.wait(5)
assert ann.strip() == EN_GOLD
def test_external_server_available(self):
""" Test starting up an external available server and accessing with a client with start_server=StartServer.DONT_START """
corenlp_home = os.getenv('CORENLP_HOME')
start_cmd = f'java -Xmx5g -cp "{corenlp_home}/*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9001 ' \
f'-timeout 60000 -server_id stanza_external_server -serverProperties {SERVER_TEST_PROPS}'
start_cmd = start_cmd and shlex.split(start_cmd)
external_server_process = subprocess.Popen(start_cmd)
time.sleep(5) # wait and make sure the external CoreNLP server is up and running
with corenlp.CoreNLPClient(start_server=corenlp.StartServer.DONT_START, endpoint="http://localhost:9001") as external_server_client:
ann = external_server_client.annotate(TEXT, annotators='tokenize,ssplit,pos', output_format='text')
assert external_server_process
external_server_process.terminate()
external_server_process.wait(5)
assert ann.strip() == EN_GOLD
def test_external_server_unavailable(self):
""" Test accessing with a client with start_server=StartServer.DONT_START to an external unavailable server """
with pytest.raises(corenlp.AnnotationException):
with corenlp.CoreNLPClient(start_server=corenlp.StartServer.DONT_START, endpoint="http://localhost:9001") as external_server_client:
ann = external_server_client.annotate(TEXT, annotators='tokenize,ssplit,pos', output_format='text')
def test_external_server_timeout(self):
""" Test starting up an external server with long response time (20 seconds) and accessing with a client with start_server=StartServer.DONT_START and timeout=5000"""
with HTTPMockServerTimeoutContext(9001, 20):
time.sleep(5) # wait and make sure the external HTTPMockServer server is up and running
with pytest.raises(corenlp.TimeoutException):
with corenlp.CoreNLPClient(start_server=corenlp.StartServer.DONT_START, endpoint="http://localhost:9001", timeout=5000) as external_server_client:
ann = external_server_client.annotate(TEXT, annotators='tokenize,ssplit,pos', output_format='text')
def test_external_server_try_start_with_external(self):
""" Test starting up an external server and accessing with a client with start_server=StartServer.TRY_START """
corenlp_home = os.getenv('CORENLP_HOME')
start_cmd = f'java -Xmx5g -cp "{corenlp_home}/*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9001 ' \
f'-timeout 60000 -server_id stanza_external_server -serverProperties {SERVER_TEST_PROPS}'
start_cmd = start_cmd and shlex.split(start_cmd)
external_server_process = subprocess.Popen(start_cmd)
with corenlp.CoreNLPClient(start_server=corenlp.StartServer.TRY_START,
annotators='tokenize,ssplit,pos',
endpoint="http://localhost:9001") as external_server_client:
ann = external_server_client.annotate(TEXT, annotators='tokenize,ssplit,pos', output_format='text')
assert external_server_process
external_server_process.terminate()
external_server_process.wait(5)
assert ann.strip() == EN_GOLD
def test_external_server_try_start(self):
""" Test starting up a server with a client with start_server=StartServer.TRY_START """
corenlp_home = os.getenv('CORENLP_HOME')
with corenlp.CoreNLPClient(start_server=corenlp.StartServer.TRY_START,
annotators='tokenize,ssplit,pos',
endpoint="http://localhost:9001") as external_server_client:
ann = external_server_client.annotate(TEXT, annotators='tokenize,ssplit,pos', output_format='text')
assert ann.strip() == EN_GOLD
def test_external_server_force_start(self):
""" Test starting up an external server and accessing with a client with start_server=StartServer.FORCE_START """
corenlp_home = os.getenv('CORENLP_HOME')
start_cmd = f'java -Xmx5g -cp "{corenlp_home}/*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9001 ' \
f'-timeout 60000 -server_id stanza_external_server -serverProperties {SERVER_TEST_PROPS}'
start_cmd = start_cmd and shlex.split(start_cmd)
external_server_process = subprocess.Popen(start_cmd)
time.sleep(5) # wait and make sure the external CoreNLP server is up and running
with pytest.raises(corenlp.PermanentlyFailedException):
with corenlp.CoreNLPClient(start_server=corenlp.StartServer.FORCE_START, endpoint="http://localhost:9001") as external_server_client:
ann = external_server_client.annotate(TEXT, annotators='tokenize,ssplit,pos', output_format='text')
assert external_server_process
external_server_process.terminate()
external_server_process.wait(5)
|
0bf02ae65da9cc6ae03942e6ff4c44e5cfaaeeab
|
753cd066a9bd26b6c37c8d53a86c7a9c659ec18c
|
/tutorials/tutorials/pytorch/basics/walkthrough_code_only.py
|
5da62764b84013dd1a08bd6468680ba0d3d3157d
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
graphcore/examples
|
ac872015808ed2a913d4d7bf0d63202ce15ebbae
|
e2f834dd60e7939672c1795b4ac62e89ad0bca49
|
refs/heads/master
| 2023-08-05T02:08:12.341836
| 2023-07-27T11:13:10
| 2023-07-27T11:13:10
| 143,977,106
| 311
| 80
|
MIT
| 2023-09-11T16:42:56
| 2018-08-08T07:29:17
|
Python
|
UTF-8
|
Python
| false
| false
| 4,876
|
py
|
walkthrough_code_only.py
|
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
# THIS FILE IS AUTOGENERATED. Rerun SST after editing source file: walkthrough.py
import torch
import poptorch
import torchvision
import torch.nn as nn
import matplotlib.pyplot as plt
from tqdm import tqdm
# Set torch random seed for reproducibility
torch.manual_seed(42)
transform = torchvision.transforms.Compose(
[
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5,), (0.5,)),
]
)
train_dataset = torchvision.datasets.FashionMNIST("~/.torch/datasets", transform=transform, download=True, train=True)
test_dataset = torchvision.datasets.FashionMNIST("~/.torch/datasets", transform=transform, download=True, train=False)
classes = (
"T-shirt",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle boot",
)
plt.figure(figsize=(30, 15))
for i, (image, label) in enumerate(train_dataset):
if i == 15:
break
image = (image / 2 + 0.5).numpy() # reverse transformation
ax = plt.subplot(5, 5, i + 1)
ax.set_title(classes[label])
plt.imshow(image[0])
plt.savefig("sample_images.png")
class ClassificationModel(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 5, 3)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(5, 12, 5)
self.norm = nn.GroupNorm(3, 12)
self.fc1 = nn.Linear(972, 100)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(100, 10)
self.log_softmax = nn.LogSoftmax(dim=1)
self.loss = nn.NLLLoss()
def forward(self, x, labels=None):
x = self.pool(self.relu(self.conv1(x)))
x = self.norm(self.relu(self.conv2(x)))
x = torch.flatten(x, start_dim=1)
x = self.relu(self.fc1(x))
x = self.log_softmax(self.fc2(x))
# The model is responsible for the calculation
# of the loss when using an IPU. We do it this way:
if self.training:
return x, self.loss(x, labels)
return x
model = ClassificationModel()
model.train()
opts = poptorch.Options()
train_dataloader = poptorch.DataLoader(opts, train_dataset, batch_size=16, shuffle=True, num_workers=20)
optimizer = poptorch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
poptorch_model = poptorch.trainingModel(model, options=opts, optimizer=optimizer)
epochs = 5
for epoch in tqdm(range(epochs), desc="epochs"):
total_loss = 0.0
for data, labels in tqdm(train_dataloader, desc="batches", leave=False):
output, loss = poptorch_model(data, labels)
total_loss += loss
poptorch_model.detachFromDevice()
torch.save(model.state_dict(), "classifier.pth")
model = model.eval()
poptorch_model_inf = poptorch.inferenceModel(model, options=opts)
test_dataloader = poptorch.DataLoader(opts, test_dataset, batch_size=32, num_workers=10)
predictions, labels = [], []
for data, label in test_dataloader:
predictions += poptorch_model_inf(data).data.max(dim=1).indices
labels += label
poptorch_model_inf.detachFromDevice()
from sklearn.metrics import accuracy_score, confusion_matrix, ConfusionMatrixDisplay
print(f"Eval accuracy: {100 * accuracy_score(labels, predictions):.2f}%")
cm = confusion_matrix(labels, predictions)
cm_plot = ConfusionMatrixDisplay(cm, display_labels=classes).plot(xticks_rotation="vertical")
cm_plot.figure_.savefig("confusion_matrix.png")
model = ClassificationModel()
model.load_state_dict(torch.load("classifier.pth"))
model.eval()
poptorch_model = poptorch.inferenceModel(model, options=poptorch.Options())
from PIL import Image, ImageOps
img = Image.open("images/trousers.jpg").resize((28, 28))
img = ImageOps.grayscale(img)
if img.getpixel((1, 1)) > 200:
img = ImageOps.invert(img)
transform = torchvision.transforms.Compose(
[
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5,), (0.5,)),
]
)
img_tensor = transform(img)
f, ax = plt.subplots(1, 2)
ax[0].imshow(Image.open("images/trousers.jpg"))
ax[0].set_title("Original image")
img_transform = torchvision.transforms.ToPILImage()
ax[1].imshow((img_transform(img_tensor)).convert("RGBA"))
ax[1].set_title("Transformed image")
plt.savefig("image_before_after.png")
img_tensor = img_tensor.unsqueeze(0)
output = poptorch_model(img_tensor)
print(output)
prediction_idx = int(output.argmax())
poptorch_model.detachFromDevice()
print("IPU predicted class:", classes[prediction_idx])
model = ClassificationModel()
model.load_state_dict(torch.load("classifier.pth"))
model.eval()
output = model(img_tensor)
print("CPU predicted class:", classes[int(output.argmax())])
opts = poptorch.Options().deviceIterations(20).replicationFactor(2).randomSeed(123).useIpuModel(True)
# Generated:2022-09-27T15:26 Source:walkthrough.py SST:0.0.8
|
f44b089b497fb0e6dee31644ceb80ec1b0b64680
|
e7aad0b1c5d8907dbb52000c482c396d1b801751
|
/test/functional/test-framework/log/presentation_policy.py
|
5409d084c17173883d10d47a0da9afc8b27ed15b
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
Open-CAS/open-cas-linux
|
c57d60f860702d7bc380c5d85cf502c0bf5e1bae
|
93334b4675afee8815f8ea12bb7297e0fd2a4195
|
refs/heads/master
| 2023-07-12T10:32:26.921455
| 2023-07-03T12:24:47
| 2023-07-03T12:24:47
| 178,356,155
| 202
| 84
|
BSD-3-Clause
| 2023-07-03T12:24:49
| 2019-03-29T07:37:15
|
Python
|
UTF-8
|
Python
| false
| false
| 464
|
py
|
presentation_policy.py
|
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
class PresentationPolicy:
def __init__(self, standard_log, group_begin_func):
self.standard = standard_log
self.group_begin = group_begin_func
def std_log_entry(msg_id, msg, log_result, html_node):
pass
def group_log_begin(msg_id, msg, html_node):
return html_node, html_node
null_policy = PresentationPolicy(std_log_entry, group_log_begin)
|
7448e0f43c44f8bf1bc9ea37c2969944f24e9f4f
|
ffb0b623455f22af81a03eb52889bd1bfed50566
|
/src/bandersnatch/tests/plugins/test_allowlist_name.py
|
ae5383fb4311c61616baca7f62310f5b7d6aa8a9
|
[
"AFL-3.0"
] |
permissive
|
pypa/bandersnatch
|
c5ba356caae55e4edb80005da625b04e7fb70500
|
bf19ea547086c1b9dd997d1dc00081109b5cd626
|
refs/heads/main
| 2023-09-03T03:27:19.538217
| 2023-08-28T23:55:04
| 2023-08-28T23:55:04
| 133,377,409
| 405
| 157
|
AFL-3.0
| 2023-09-13T10:46:33
| 2018-05-14T14:52:22
|
Python
|
UTF-8
|
Python
| false
| false
| 13,808
|
py
|
test_allowlist_name.py
|
import os
from collections import defaultdict
from pathlib import Path
from tempfile import TemporaryDirectory
from unittest import TestCase
import bandersnatch.filter
import bandersnatch.storage
from bandersnatch.master import Master
from bandersnatch.mirror import BandersnatchMirror
from bandersnatch.package import Package
from bandersnatch.tests.mock_config import mock_config
class TestAllowListProject(TestCase):
"""
Tests for the bandersnatch filtering classes
"""
def setUp(self) -> None:
self.cwd = os.getcwd()
self.tempdir = TemporaryDirectory()
bandersnatch.storage.loaded_storage_plugins = defaultdict(list)
os.chdir(self.tempdir.name)
def tearDown(self) -> None:
if self.tempdir:
assert self.cwd
os.chdir(self.cwd)
self.tempdir.cleanup()
def test__plugin__loads__explicitly_enabled(self) -> None:
mock_config(contents="""\
[mirror]
storage-backend = filesystem
workers = 2
[plugins]
enabled =
allowlist_project
""")
plugins = bandersnatch.filter.LoadedFilters().filter_project_plugins()
names = [plugin.name for plugin in plugins]
self.assertListEqual(names, ["allowlist_project"])
self.assertEqual(len(plugins), 1)
def test__plugin__loads__default(self) -> None:
mock_config("""\
[mirror]
storage-backend = filesystem
workers = 2
[plugins]
""")
plugins = bandersnatch.filter.LoadedFilters().filter_project_plugins()
names = [plugin.name for plugin in plugins]
self.assertNotIn("allowlist_project", names)
def test__filter__matches__package(self) -> None:
mock_config("""\
[mirror]
storage-backend = filesystem
workers = 2
[plugins]
enabled =
allowlist_project
[allowlist]
packages =
foo
""")
mirror = BandersnatchMirror(Path("."), Master(url="https://foo.bar.com"))
mirror.packages_to_sync = {"foo": ""}
mirror._filter_packages()
self.assertIn("foo", mirror.packages_to_sync.keys())
def test__filter__nomatch_package(self) -> None:
mock_config("""\
[mirror]
storage-backend = filesystem
workers = 2
[plugins]
enabled =
allowlist_project
[allowlist]
packages =
foo
""")
mirror = BandersnatchMirror(Path("."), Master(url="https://foo.bar.com"))
mirror.packages_to_sync = {"foo": "", "foo2": ""}
mirror._filter_packages()
self.assertIn("foo", mirror.packages_to_sync.keys())
self.assertNotIn("foo2", mirror.packages_to_sync.keys())
def test__filter__name_only(self) -> None:
mock_config("""\
[mirror]
storage-backend = filesystem
workers = 2
[plugins]
enabled =
allowlist_project
[allowlist]
packages =
foo==1.2.3
""")
mirror = BandersnatchMirror(Path("."), Master(url="https://foo.bar.com"))
mirror.packages_to_sync = {"foo": "", "foo2": ""}
mirror._filter_packages()
self.assertIn("foo", mirror.packages_to_sync.keys())
self.assertNotIn("foo2", mirror.packages_to_sync.keys())
def test__filter__varying__specifiers(self) -> None:
mock_config("""\
[mirror]
storage-backend = filesystem
workers = 2
[plugins]
enabled =
allowlist_project
[allowlist]
packages =
foo==1.2.3
bar~=3.0,<=1.5
""")
mirror = BandersnatchMirror(Path("."), Master(url="https://foo.bar.com"))
mirror.packages_to_sync = {
"foo": "",
"bar": "",
"snu": "",
}
mirror._filter_packages()
self.assertEqual({"foo": "", "bar": ""}, mirror.packages_to_sync)
def test__filter__commented__out(self) -> None:
mock_config("""\
[mirror]
storage-backend = filesystem
workers = 2
[plugins]
enabled =
allowlist_project
[allowlist]
packages =
foo==1.2.3 # inline comment
# bar
""")
mirror = BandersnatchMirror(Path("."), Master(url="https://foo.bar.com"))
mirror.packages_to_sync = {
"foo": "",
"bar": "",
"snu": "",
}
mirror._filter_packages()
self.assertEqual({"foo": ""}, mirror.packages_to_sync)
class TestAllowlistRelease(TestCase):
"""
Tests for the bandersnatch filtering classes
"""
def setUp(self) -> None:
self.cwd = os.getcwd()
self.tempdir = TemporaryDirectory()
os.chdir(self.tempdir.name)
def tearDown(self) -> None:
if self.tempdir:
assert self.cwd
os.chdir(self.cwd)
self.tempdir.cleanup()
def test__plugin__loads__explicitly_enabled(self) -> None:
mock_config("""\
[plugins]
enabled =
allowlist_release
""")
plugins = bandersnatch.filter.LoadedFilters().filter_release_plugins()
names = [plugin.name for plugin in plugins]
self.assertListEqual(names, ["allowlist_release"])
self.assertEqual(len(plugins), 1)
def test__plugin__doesnt_load__explicitly__disabled(self) -> None:
mock_config("""\
[mirror]
storage-backend = filesystem
workers = 2
[plugins]
enabled =
allowlist_package
""")
plugins = bandersnatch.filter.LoadedFilters().filter_release_plugins()
names = [plugin.name for plugin in plugins]
self.assertNotIn("allowlist_release", names)
def test__filter__matches__release(self) -> None:
mock_config("""\
[mirror]
storage-backend = filesystem
workers = 2
[plugins]
enabled =
allowlist_release
[allowlist]
packages =
foo==1.2.0
""")
mirror = BandersnatchMirror(Path("."), Master(url="https://foo.bar.com"))
pkg = Package("foo", 1)
pkg._metadata = {
"info": {"name": "foo"},
"releases": {"1.2.0": {}, "1.2.1": {}},
}
pkg.filter_all_releases(mirror.filters.filter_release_plugins())
self.assertEqual(pkg.releases, {"1.2.0": {}})
def test__filter__matches__release__commented__inline(self) -> None:
mock_config("""\
[mirror]
storage-backend = filesystem
workers = 2
[plugins]
enabled =
allowlist_release
[allowlist]
packages =
foo==1.2.0 # some inline comment
""")
mirror = BandersnatchMirror(Path("."), Master(url="https://foo.bar.com"))
pkg = Package("foo", 1)
pkg._metadata = {
"info": {"name": "foo"},
"releases": {"1.2.0": {}, "1.2.1": {}},
}
pkg.filter_all_releases(mirror.filters.filter_release_plugins())
self.assertEqual(pkg.releases, {"1.2.0": {}})
def test__dont__filter__prereleases(self) -> None:
mock_config("""\
[mirror]
storage-backend = filesystem
workers = 2
[plugins]
enabled =
allowlist_release
[allowlist]
packages =
foo<=1.2.0
""")
mirror = BandersnatchMirror(Path("."), Master(url="https://foo.bar.com"))
pkg = Package("foo", 1)
pkg._metadata = {
"info": {"name": "foo"},
"releases": {
"1.1.0a2": {},
"1.1.1beta1": {},
"1.2.0": {},
"1.2.1": {},
"1.2.2alpha3": {},
"1.2.3rc1": {},
},
}
pkg.filter_all_releases(mirror.filters.filter_release_plugins())
self.assertEqual(pkg.releases, {"1.1.0a2": {}, "1.1.1beta1": {}, "1.2.0": {}})
def test__casing__no__affect(self) -> None:
mock_config("""\
[mirror]
storage-backend = filesystem
workers = 2
[plugins]
enabled =
allowlist_release
[allowlist]
packages =
Foo<=1.2.0
""")
mirror = BandersnatchMirror(Path("."), Master(url="https://foo.bar.com"))
pkg = Package("foo", 1)
pkg._metadata = {
"info": {"name": "foo"},
"releases": {"1.2.0": {}, "1.2.1": {}},
}
pkg.filter_all_releases(mirror.filters.filter_release_plugins())
self.assertEqual(pkg.releases, {"1.2.0": {}})
class TestAllowlistRequirements(TestCase):
"""
Tests for the bandersnatch filtering by requirements
"""
def setUp(self) -> None:
self.cwd = os.getcwd()
self.tempdir = TemporaryDirectory()
os.chdir(self.tempdir.name)
def tearDown(self) -> None:
if self.tempdir:
assert self.cwd
os.chdir(self.cwd)
self.tempdir.cleanup()
def test__plugin__loads__explicitly_enabled(self) -> None:
mock_config("""\
[mirror]
storage-backend = filesystem
workers = 2
[plugins]
enabled =
project_requirements_pinned
""")
plugins = bandersnatch.filter.LoadedFilters().filter_release_plugins()
names = [plugin.name for plugin in plugins]
self.assertListEqual(names, ["project_requirements_pinned"])
self.assertEqual(len(plugins), 1)
def test__plugin__doesnt_load__explicitly__disabled(self) -> None:
mock_config("""\
[mirror]
storage-backend = filesystem
workers = 2
[plugins]
enabled =
allowlist_package
""")
plugins = bandersnatch.filter.LoadedFilters().filter_release_plugins()
names = [plugin.name for plugin in plugins]
self.assertNotIn("project_requirements", names)
def test__filter__matches__release(self) -> None:
with open(Path(self.tempdir.name) / "requirements.txt", "w") as fh:
fh.write("""\
# This is needed for workshop 1
#
foo==1.2.0 # via -r requirements.in
""")
mock_config(f"""\
[mirror]
storage-backend = filesystem
workers = 2
[plugins]
enabled =
project_requirements
project_requirements_pinned
[allowlist]
requirements_path = {self.tempdir.name}
requirements =
requirements.txt
""")
mirror = BandersnatchMirror(Path("."), Master(url="https://foo.bar.com"))
pkg = Package("foo", 1)
pkg._metadata = {
"info": {"name": "foo"},
"releases": {"1.2.0": {}, "1.2.1": {}},
}
pkg.filter_all_releases(mirror.filters.filter_release_plugins())
self.assertEqual({"1.2.0": {}}, pkg.releases)
def test__filter__find_files(self) -> None:
absolute_file_path = Path(self.tempdir.name) / "requirements.txt"
with open(absolute_file_path, "w") as fh:
fh.write("""\
# This is needed for workshop 1
#
foo==1.2.0 # via -r requirements.in
""")
mock_config(f"""\
[mirror]
storage-backend = filesystem
workers = 2
[plugins]
enabled =
project_requirements
[allowlist]
requirements =
{absolute_file_path}
""")
mirror = BandersnatchMirror(Path("."), Master(url="https://foo.bar.com"))
mirror.packages_to_sync = {
"foo": "",
"bar": "",
"baz": "",
}
mirror._filter_packages()
self.assertEqual({"foo": ""}, mirror.packages_to_sync)
def test__filter__requirements__pip__options(self) -> None:
absolute_file_path = Path(self.tempdir.name) / "requirements.txt"
with open(absolute_file_path, "w") as fh:
fh.write("""\
--extra-index-url https://self-hosted-foo.netname/simple
--trusted-host self-hosted-foo.netname
foo==1.2.0 # via -r requirements.in
""")
mock_config(f"""\
[mirror]
storage-backend = filesystem
workers = 2
[plugins]
enabled =
project_requirements
[allowlist]
requirements =
{absolute_file_path}
""")
mirror = BandersnatchMirror(Path("."), Master(url="https://foo.bar.com"))
mirror.packages_to_sync = {
"foo": "",
"bar": "",
"baz": "",
}
mirror._filter_packages()
self.assertEqual({"foo": ""}, mirror.packages_to_sync)
def test__filter__find__glob__files(self) -> None:
with open(Path(self.tempdir.name) / "requirements-project1.txt", "w") as fh:
fh.write("""\
#
foo==1.2.0 # via -r requirements.in
""")
with open(Path(self.tempdir.name) / "requirements-project2.txt", "w") as fh:
fh.write("""\
#
bar==2.3.0 # via -r requirements.in
""")
with open(Path(self.tempdir.name) / "project3.txt", "w") as fh:
fh.write("""\
#
baz==4.5.1 # via -r requirements.in
""")
mock_config(f"""\
[mirror]
storage-backend = filesystem
workers = 2
[plugins]
enabled =
project_requirements
[allowlist]
requirements_path = {self.tempdir.name}
requirements =
# Importing all the requirements-*.txt from the chosen folder
requirements-*.txt
""")
mirror = BandersnatchMirror(Path("."), Master(url="https://foo.bar.com"))
mirror.packages_to_sync = {
"foo": "",
"bar": "",
"baz": "",
}
mirror._filter_packages()
# Check that the packages in the two allowed files starting
# for requirements- are being considered
self.assertIn("foo", mirror.packages_to_sync)
self.assertIn("bar", mirror.packages_to_sync)
# Check that the package in the last file, excluded
# from the glob is not considered
self.assertNotIn("baz", mirror.packages_to_sync)
def test__filter__requirements__utf16__encoding(self) -> None:
absolute_file_path = Path(self.tempdir.name) / "requirements.txt"
with open(absolute_file_path, "w", encoding="UTF-16") as fh:
fh.write("""\
foo==1.2.0 # via -r requirements.in
""")
mock_config(f"""\
[mirror]
storage-backend = filesystem
workers = 2
[plugins]
enabled =
project_requirements
[allowlist]
requirements =
{absolute_file_path}
""")
mirror = BandersnatchMirror(Path("."), Master(url="https://foo.bar.com"))
mirror.packages_to_sync = {
"foo": "",
"bar": "",
"baz": "",
}
mirror._filter_packages()
self.assertEqual({"foo": ""}, mirror.packages_to_sync)
|
5ea38004cf355eea757a3da053acfe1b18062f81
|
c07b338d12f694069336da69379f60d04d0084c2
|
/example_project/example_project/settings.py
|
c251e675b23c9887a7f2efa4716ea31042636057
|
[
"BSD-3-Clause"
] |
permissive
|
richardbarran/django-photologue
|
2dfc74e6d01ebd4195149fef200a82d84caacfe4
|
95c81122e35fce373914449637c08cebbfcff600
|
refs/heads/master
| 2023-08-12T12:25:09.321683
| 2023-07-28T18:45:54
| 2023-07-28T18:45:54
| 5,278,549
| 188
| 68
|
BSD-3-Clause
| 2022-08-08T20:18:21
| 2012-08-02T22:16:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,547
|
py
|
settings.py
|
# Global settings for photologue example project.
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '=_v6sfp8u2uuhdncdz9t1_nu8(#8q4=40$f$4rorj4q3)f-nlc'
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Note: added sitemaps to the INSTALLED_APPS just so that unit tests run,
# but not actually added a sitemap in urls.py.
'django.contrib.sitemaps',
'photologue',
'sortedm2m',
'example_project',
)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'example_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'example_project/templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
],
'debug': True,
},
},
]
WSGI_APPLICATION = 'example_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'public', 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'public', 'media')
MEDIA_URL = '/media/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'example_project/static'),
)
SITE_ID = 1
# LOGGING CONFIGURATION
# A logging configuration that writes log messages to the console.
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
# Formatting of messages.
'formatters': {
# Don't need to show the time when logging to console.
'console': {
'format': '%(levelname)s %(name)s.%(funcName)s (%(lineno)d) %(message)s'
}
},
# The handlers decide what we should do with a logging message - do we email
# it, ditch it, or write it to a file?
'handlers': {
# Writing to console. Use only in dev.
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'console'
},
# Send logs to /dev/null.
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
},
# Loggers decide what is logged.
'loggers': {
'': {
# Default (suitable for dev) is to log to console.
'handlers': ['console'],
'level': 'INFO',
'propagate': False,
},
'photologue': {
# Default (suitable for dev) is to log to console.
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
# logging of SQL statements. Default is to ditch them (send them to
# null). Note that this logger only works if DEBUG = True.
'django.db.backends': {
'handlers': ['null'],
'level': 'DEBUG',
'propagate': False,
},
}
}
# Don't display logging messages to console during unit test runs.
if len(sys.argv) > 1 and sys.argv[1] == 'test':
LOGGING['loggers']['']['handlers'] = ['null']
LOGGING['loggers']['photologue']['handlers'] = ['null']
# Uncomment this for Amazon S3 file storage
# from example_storages.settings_s3boto import *
|
592877e28424be97195961189601cc62b6fbc755
|
ef2c1a0ae0f1746e58fcc160844788ab92a8d488
|
/tests/trainers/nlp/test_hf_trainer.py
|
25818558b61b619588b2089615f9645a626dd270
|
[
"MIT",
"LicenseRef-scancode-free-unknown",
"LGPL-2.1-or-later",
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
microsoft/archai
|
4d04476ef6a434148638ef91df0ef3bf2c948422
|
95d6e19a1523a701b3fbc249dd1a7d1e7ba44aee
|
refs/heads/main
| 2023-09-03T13:23:48.576626
| 2023-07-27T01:30:01
| 2023-07-27T01:30:01
| 245,036,506
| 439
| 97
|
MIT
| 2023-05-09T21:10:10
| 2020-03-05T00:54:29
|
Python
|
UTF-8
|
Python
| false
| false
| 2,081
|
py
|
test_hf_trainer.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import tempfile
import torch
from transformers import TrainerState, TrainingArguments
from archai.trainers.nlp.hf_trainer import HfTrainer
def test_hf_trainer_rotate_checkpoints():
model = torch.nn.Linear(10, 5)
args = TrainingArguments("tmp", save_total_limit=2, load_best_model_at_end=False)
trainer = HfTrainer(model, args=args)
state = TrainerState(best_model_checkpoint=None)
trainer.state = state
with tempfile.TemporaryDirectory() as temp_dir:
checkpoint_1 = os.path.join(temp_dir, "checkpoint-1")
os.mkdir(checkpoint_1)
checkpoint_2 = os.path.join(temp_dir, "checkpoint-2")
os.mkdir(checkpoint_2)
checkpoint_3 = os.path.join(temp_dir, "checkpoint-3")
os.mkdir(checkpoint_3)
# Assert that nothing happens when `save_total_limit` is None or 0
trainer.args.save_total_limit = None
trainer._rotate_checkpoints(output_dir=temp_dir)
assert os.path.exists(checkpoint_1)
assert os.path.exists(checkpoint_2)
assert os.path.exists(checkpoint_3)
trainer.args.save_total_limit = 0
trainer._rotate_checkpoints(output_dir=temp_dir)
assert os.path.exists(checkpoint_1)
assert os.path.exists(checkpoint_2)
assert os.path.exists(checkpoint_3)
# Assert that only the oldest checkpoint is deleted
trainer.args.save_total_limit = 2
trainer._rotate_checkpoints(output_dir=temp_dir)
assert not os.path.exists(checkpoint_1)
assert os.path.exists(checkpoint_2)
assert os.path.exists(checkpoint_3)
# Assert that the last checkpoint is not deleted when `load_best_model_at_end` is True
trainer.args.load_best_model_at_end = True
trainer.state.best_model_checkpoint = checkpoint_3
trainer._rotate_checkpoints(output_dir=temp_dir)
assert not os.path.exists(checkpoint_1)
assert os.path.exists(checkpoint_2)
assert os.path.exists(checkpoint_3)
|
5e7d7edf7a7c1507bcf85994f699306536db28a7
|
902abf2c8a0ae6147975864802575b5e543ef1e4
|
/src/ZODB/scripts/fsstats.py
|
dddd6b82fe5515c193a1f7679f23bb47f7b7bcdc
|
[
"ZPL-2.1"
] |
permissive
|
zopefoundation/ZODB
|
b4d37dfe232a60dccf226f82276d630148fe43db
|
0632974df76e90a3f00b45b995bdff0209dd2def
|
refs/heads/master
| 2023-09-04T05:26:06.885522
| 2023-08-01T17:16:24
| 2023-08-01T17:16:24
| 7,357,595
| 629
| 104
|
NOASSERTION
| 2023-08-01T17:16:26
| 2012-12-28T17:47:39
|
Python
|
UTF-8
|
Python
| false
| false
| 5,865
|
py
|
fsstats.py
|
#!/usr/bin/env python2
"""Print details statistics from fsdump output."""
import re
import sys
rx_txn = re.compile(r"tid=([0-9a-f]+).*size=(\d+)")
rx_data = re.compile(r"oid=([0-9a-f]+) size=(\d+) class=(\S+)")
def sort_byhsize(seq, reverse=False):
L = [(v.size(), k, v) for k, v in seq]
L.sort()
if reverse:
L.reverse()
return [(k, v) for n, k, v in L]
class Histogram(dict):
def add(self, size):
self[size] = self.get(size, 0) + 1
def size(self):
return sum(self.values())
def mean(self):
product = sum([k * v for k, v in self.items()])
return product / self.size()
def median(self):
# close enough?
n = self.size() / 2
L = sorted(self.keys())
L.reverse()
while 1:
k = L.pop()
if self[k] > n:
return k
n -= self[k]
def mode(self):
mode = 0
value = 0
for k, v in self.items():
if v > value:
value = v
mode = k
return mode
def make_bins(self, binsize):
try:
maxkey = max(self.keys())
except ValueError:
maxkey = 0
self.binsize = binsize
self.bins = [0] * (1 + maxkey // binsize)
for k, v in self.items():
b = k // binsize
self.bins[b] += v
def report(self, name, binsize=50, usebins=False, gaps=True, skip=True):
if usebins:
# Use existing bins with whatever size they have
binsize = self.binsize
else:
# Make new bins
self.make_bins(binsize)
maxval = max(self.bins)
# Print up to 40 dots for a value
dot = max(maxval / 40, 1)
tot = sum(self.bins)
print(name)
print("Total", tot, end=' ')
print("Median", self.median(), end=' ')
print("Mean", self.mean(), end=' ')
print("Mode", self.mode(), end=' ')
print("Max", max(self))
print("One * represents", dot)
gap = False
cum = 0
for i, n in enumerate(self.bins):
if gaps and (not n or (skip and not n / dot)):
if not gap:
print(" ...")
gap = True
continue
gap = False
p = 100 * n / tot
cum += n
pc = 100 * cum / tot
print("%6d %6d %3d%% %3d%% %s" % (
i * binsize, n, p, pc, "*" * (n // dot)))
print()
def class_detail(class_size):
# summary of classes
fmt = "%5s %6s %6s %6s %-50.50s"
labels = ["num", "median", "mean", "mode", "class"]
print(fmt % tuple(labels))
print(fmt % tuple(["-" * len(s) for s in labels]))
for klass, h in sort_byhsize(class_size.items()):
print(fmt % (h.size(), h.median(), h.mean(), h.mode(), klass))
print()
# per class details
for klass, h in sort_byhsize(class_size.items(), reverse=True):
h.make_bins(50)
if len(tuple(filter(None, h.bins))) == 1:
continue
h.report("Object size for %s" % klass, usebins=True)
def revision_detail(lifetimes, classes):
# Report per-class details for any object modified more than once
for name, oids in classes.items():
h = Histogram()
keep = False
for oid in dict.fromkeys(oids, 1):
L = lifetimes.get(oid)
n = len(L)
h.add(n)
if n > 1:
keep = True
if keep:
h.report("Number of revisions for %s" % name, binsize=10)
def main(path=None):
if path is None:
path = sys.argv[1]
txn_objects = Histogram() # histogram of txn size in objects
txn_bytes = Histogram() # histogram of txn size in bytes
obj_size = Histogram() # histogram of object size
n_updates = Histogram() # oid -> num updates
n_classes = Histogram() # class -> num objects
lifetimes = {} # oid -> list of tids
class_size = {} # class -> histogram of object size
classes = {} # class -> list of oids
MAX = 0
objects = 0
tid = None
f = open(path)
for i, line in enumerate(f):
if MAX and i > MAX:
break
if line.startswith(" data"):
m = rx_data.search(line)
if not m:
continue
oid, size, klass = m.groups()
size = int(size)
obj_size.add(size)
n_updates.add(oid)
n_classes.add(klass)
h = class_size.get(klass)
if h is None:
h = class_size[klass] = Histogram()
h.add(size)
L = lifetimes.setdefault(oid, [])
L.append(tid)
L = classes.setdefault(klass, [])
L.append(oid)
objects += 1
elif line.startswith("Trans"):
if tid is not None:
txn_objects.add(objects)
m = rx_txn.search(line)
if not m:
continue
tid, size = m.groups()
size = int(size)
objects = 0
txn_bytes.add(size)
if objects:
txn_objects.add(objects)
f.close()
print("Summary: %d txns, %d objects, %d revisions" % (
txn_objects.size(), len(n_updates), n_updates.size()))
print()
txn_bytes.report("Transaction size (bytes)", binsize=1024)
txn_objects.report("Transaction size (objects)", binsize=10)
obj_size.report("Object size", binsize=128)
# object lifetime info
h = Histogram()
for k, v in lifetimes.items():
h.add(len(v))
h.report("Number of revisions", binsize=10, skip=False)
# details about revisions
revision_detail(lifetimes, classes)
class_detail(class_size)
if __name__ == "__main__":
main()
|
cff0868de1e6f101dad30c7e276d0faf1f1f8835
|
8b5d61f17ab2e4c158270cf6dda79f9a47870df1
|
/sknetwork/embedding/svd.py
|
d8fd7dfdad04df02ef08e20e2d16503f929eaec6
|
[
"BSD-3-Clause"
] |
permissive
|
sknetwork-team/scikit-network
|
55a5ecbbbd2dfc78095aa74f3953c770357cadbb
|
95cec38d56b086b95616d2f1d13a9b98c6c8b534
|
refs/heads/master
| 2023-09-03T21:56:42.345214
| 2023-05-22T14:12:57
| 2023-05-22T14:12:57
| 135,287,970
| 581
| 73
|
NOASSERTION
| 2023-07-21T05:42:25
| 2018-05-29T11:45:42
|
Python
|
UTF-8
|
Python
| false
| false
| 14,734
|
py
|
svd.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created in May 2018
@author: Nathan de Lara <nathan.delara@polytechnique.org>
@author: Thomas Bonald <bonald@enst.fr>
"""
from typing import Union
import numpy as np
from scipy import sparse
from sknetwork.embedding.base import BaseEmbedding
from sknetwork.linalg import SVDSolver, LanczosSVD, safe_sparse_dot, diagonal_pseudo_inverse, normalize, Regularizer, SparseLR
from sknetwork.utils.check import check_format, check_adjacency_vector, check_nonnegative, check_n_components
class GSVD(BaseEmbedding):
"""Graph embedding by Generalized Singular Value Decomposition of the adjacency or biadjacency matrix :math:`A`.
This is equivalent to the Singular Value Decomposition of the matrix :math:`D_1^{- \\alpha_1}AD_2^{- \\alpha_2}`
where :math:`D_1, D_2` are the diagonal matrices of row weights and columns weights, respectively, and
:math:`\\alpha_1, \\alpha_2` are parameters.
Parameters
-----------
n_components : int
Dimension of the embedding.
regularization : ``None`` or float (default = ``None``)
Regularization factor :math:`\\alpha` so that the matrix is :math:`A + \\alpha \\frac{11^T}{n}`.
factor_row : float (default = 0.5)
Power factor :math:`\\alpha_1` applied to the diagonal matrix of row weights.
factor_col : float (default = 0.5)
Power factor :math:`\\alpha_2` applied to the diagonal matrix of column weights.
factor_singular : float (default = 0.)
Parameter :math:`\\alpha` applied to the singular values on right singular vectors.
The embedding of rows and columns are respectively :math:`D_1^{- \\alpha_1}U \\Sigma^{1-\\alpha}` and
:math:`D_2^{- \\alpha_2}V \\Sigma^\\alpha` where:
* :math:`U` is the matrix of left singular vectors, shape (n_row, n_components)
* :math:`V` is the matrix of right singular vectors, shape (n_col, n_components)
* :math:`\\Sigma` is the diagonal matrix of singular values, shape (n_components, n_components)
normalized : bool (default = ``True``)
If ``True``, normalized the embedding so that each vector has norm 1 in the embedding space, i.e.,
each vector lies on the unit sphere.
solver : ``'lanczos'`` (Lanczos algorithm, default) or :class:`SVDSolver` (custom solver)
Which solver to use.
Attributes
----------
embedding_ : array, shape = (n, n_components)
Embedding of the nodes.
embedding_row_ : array, shape = (n_row, n_components)
Embedding of the rows, for bipartite graphs.
embedding_col_ : array, shape = (n_col, n_components)
Embedding of the columns, for bipartite graphs.
singular_values_ : np.ndarray, shape = (n_components)
Singular values.
singular_vectors_left_ : np.ndarray, shape = (n_row, n_components)
Left singular vectors.
singular_vectors_right_ : np.ndarray, shape = (n_col, n_components)
Right singular vectors.
weights_col_ : np.ndarray, shape = (n2)
Weights applied to columns.
Example
-------
>>> from sknetwork.embedding import GSVD
>>> from sknetwork.data import karate_club
>>> gsvd = GSVD()
>>> adjacency = karate_club()
>>> embedding = gsvd.fit_transform(adjacency)
>>> embedding.shape
(34, 2)
References
----------
Abdi, H. (2007).
`Singular value decomposition (SVD) and generalized singular value decomposition.
<https://www.cs.cornell.edu/cv/ResearchPDF/Generalizing%20The%20Singular%20Value%20Decomposition.pdf>`_
Encyclopedia of measurement and statistics, 907-912.
"""
def __init__(self, n_components=2, regularization: Union[None, float] = None,
factor_row: float = 0.5, factor_col: float = 0.5, factor_singular: float = 0., normalized: bool = True,
solver: Union[str, SVDSolver] = 'lanczos'):
super(GSVD, self).__init__()
self.n_components = n_components
if regularization == 0:
self.regularization = None
else:
self.regularization = regularization
self.factor_row = factor_row
self.factor_col = factor_col
self.factor_singular = factor_singular
self.normalized = normalized
self.solver = solver
self.singular_values_ = None
self.singular_vectors_left_ = None
self.singular_vectors_right_ = None
self.regularization_ = None
self.weights_col_ = None
def fit(self, input_matrix: Union[sparse.csr_matrix, np.ndarray]) -> 'GSVD':
"""Compute the embedding of the graph.
Parameters
----------
input_matrix :
Adjacency matrix or biadjacency matrix of the graph.
Returns
-------
self: :class:`GSVD`
"""
self._init_vars()
adjacency = check_format(input_matrix).asfptype()
n_row, n_col = adjacency.shape
n_components = check_n_components(self.n_components, min(n_row, n_col) - 1)
if isinstance(self.solver, str):
self.solver = LanczosSVD()
regularization = self.regularization
if regularization:
adjacency_reg = Regularizer(adjacency, regularization)
else:
adjacency_reg = adjacency
weights_row = adjacency_reg.dot(np.ones(n_col))
weights_col = adjacency_reg.T.dot(np.ones(n_row))
diag_row = diagonal_pseudo_inverse(np.power(weights_row, self.factor_row))
diag_col = diagonal_pseudo_inverse(np.power(weights_col, self.factor_col))
self.solver.fit(safe_sparse_dot(diag_row, safe_sparse_dot(adjacency_reg, diag_col)), n_components)
singular_values = self.solver.singular_values_
index = np.argsort(-singular_values)
singular_values = singular_values[index]
singular_vectors_left = self.solver.singular_vectors_left_[:, index]
singular_vectors_right = self.solver.singular_vectors_right_[:, index]
singular_left_diag = sparse.diags(np.power(singular_values, 1 - self.factor_singular))
singular_right_diag = sparse.diags(np.power(singular_values, self.factor_singular))
embedding_row = diag_row.dot(singular_vectors_left)
embedding_col = diag_col.dot(singular_vectors_right)
embedding_row = singular_left_diag.dot(embedding_row.T).T
embedding_col = singular_right_diag.dot(embedding_col.T).T
if self.normalized:
embedding_row = normalize(embedding_row, p=2)
embedding_col = normalize(embedding_col, p=2)
self.embedding_row_ = embedding_row
self.embedding_col_ = embedding_col
self.embedding_ = embedding_row
self.singular_values_ = singular_values
self.singular_vectors_left_ = singular_vectors_left
self.singular_vectors_right_ = singular_vectors_right
self.weights_col_ = weights_col
return self
@staticmethod
def _check_adj_vector(adjacency_vectors):
check_nonnegative(adjacency_vectors)
def predict(self, adjacency_vectors: Union[sparse.csr_matrix, np.ndarray]) -> np.ndarray:
"""Predict the embedding of new rows, defined by their adjacency vectors.
Parameters
----------
adjacency_vectors :
Adjacency vectors of nodes.
Array of shape (n_col,) (single vector) or (n_vectors, n_col)
Returns
-------
embedding_vectors : np.ndarray
Embedding of the nodes.
"""
self._check_fitted()
singular_vectors_right = self.singular_vectors_right_
singular_values = self.singular_values_
n_row, _ = self.embedding_row_.shape
n_col, _ = self.embedding_col_.shape
adjacency_vectors = check_adjacency_vector(adjacency_vectors, n_col)
self._check_adj_vector(adjacency_vectors)
# regularization
if self.regularization:
adjacency_vectors = Regularizer(adjacency_vectors, self.regularization)
# weighting
weights_row = adjacency_vectors.dot(np.ones(n_col))
diag_row = diagonal_pseudo_inverse(np.power(weights_row, self.factor_row))
diag_col = diagonal_pseudo_inverse(np.power(self.weights_col_, self.factor_col))
adjacency_vectors = safe_sparse_dot(diag_row, safe_sparse_dot(adjacency_vectors, diag_col))
# projection in the embedding space
averaging = adjacency_vectors
embedding_vectors = diag_row.dot(averaging.dot(singular_vectors_right))
# scaling
embedding_vectors /= np.power(singular_values, self.factor_singular)
if self.normalized:
embedding_vectors = normalize(embedding_vectors, p=2)
if len(embedding_vectors) == 1:
embedding_vectors = embedding_vectors.ravel()
return embedding_vectors
class SVD(GSVD):
"""Graph embedding by Singular Value Decomposition of the adjacency or biadjacency matrix of the graph.
Parameters
----------
n_components : int
Dimension of the embedding.
regularization : ``None`` or float (default = ``None``)
Regularization factor :math:`\\alpha` so that the matrix is :math:`A + \\alpha \\frac{11^T}{n}`.
factor_singular : float (default = 0.)
Power factor :math:`\\alpha` applied to the singular values on right singular vectors.
The embedding of rows and columns are respectively :math:`U \\Sigma^{1-\\alpha}` and
:math:`V \\Sigma^\\alpha` where:
* :math:`U` is the matrix of left singular vectors, shape (n_row, n_components)
* :math:`V` is the matrix of right singular vectors, shape (n_col, n_components)
* :math:`\\Sigma` is the diagonal matrix of singular values, shape (n_components, n_components)
normalized : bool (default = ``False``)
If ``True``, normalized the embedding so that each vector has norm 1 in the embedding space, i.e.,
each vector lies on the unit sphere.
solver : ``'lanczos'`` (Lanczos algorithm, default) or :class:`SVDSolver` (custom solver)
Which solver to use.
Attributes
----------
embedding_ : array, shape = (n, n_components)
Embedding of the nodes.
embedding_row_ : array, shape = (n_row, n_components)
Embedding of the rows, for bipartite graphs.
embedding_col_ : array, shape = (n_col, n_components)
Embedding of the columns, for bipartite graphs.
singular_values_ : np.ndarray, shape = (n_components)
Singular values.
singular_vectors_left_ : np.ndarray, shape = (n_row, n_components)
Left singular vectors.
singular_vectors_right_ : np.ndarray, shape = (n_col, n_components)
Right singular vectors.
Example
-------
>>> from sknetwork.embedding import SVD
>>> from sknetwork.data import karate_club
>>> svd = SVD()
>>> adjacency = karate_club()
>>> embedding = svd.fit_transform(adjacency)
>>> embedding.shape
(34, 2)
References
----------
Abdi, H. (2007).
`Singular value decomposition (SVD) and generalized singular value decomposition.
<https://www.cs.cornell.edu/cv/ResearchPDF/Generalizing%20The%20Singular%20Value%20Decomposition.pdf>`_
Encyclopedia of measurement and statistics.
"""
def __init__(self, n_components=2, regularization: Union[None, float] = None, factor_singular: float = 0.,
normalized: bool = False, solver: Union[str, SVDSolver] = 'lanczos'):
super(SVD, self).__init__(n_components=n_components, regularization=regularization,
factor_singular=factor_singular, factor_row=0., factor_col=0., normalized=normalized,
solver=solver)
@staticmethod
def _check_adj_vector(adjacency_vectors: np.ndarray):
return
class PCA(SVD):
"""Graph embedding by Principal Component Analysis of the adjacency or biadjacency matrix.
Parameters
----------
n_components : int
Dimension of the embedding.
normalized : bool (default = ``False``)
If ``True``, normalized the embedding so that each vector has norm 1 in the embedding space, i.e.,
each vector lies on the unit sphere.
solver : ``'lanczos'`` (Lanczos algorithm, default) or :class:`SVDSolver` (custom solver)
Which solver to use.
Attributes
----------
embedding_ : array, shape = (n, n_components)
Embedding of the nodes.
embedding_row_ : array, shape = (n_row, n_components)
Embedding of the rows, for bipartite graphs.
embedding_col_ : array, shape = (n_col, n_components)
Embedding of the columns, for bipartite graphs.
singular_values_ : np.ndarray, shape = (n_components)
Singular values.
singular_vectors_left_ : np.ndarray, shape = (n_row, n_components)
Left singular vectors.
singular_vectors_right_ : np.ndarray, shape = (n_col, n_components)
Right singular vectors.
Example
-------
>>> from sknetwork.embedding import PCA
>>> from sknetwork.data import karate_club
>>> pca = PCA()
>>> adjacency = karate_club()
>>> embedding = pca.fit_transform(adjacency)
>>> embedding.shape
(34, 2)
References
----------
Jolliffe, I.T. (2002).
`Principal Component Analysis`
Series: Springer Series in Statistics.
"""
def __init__(self, n_components=2, normalized: bool = False, solver: Union[str, SVDSolver] = 'lanczos'):
super(PCA, self).__init__()
self.n_components = n_components
self.normalized = normalized
if isinstance(solver, str):
self.solver = LanczosSVD()
else:
self.solver = solver
def fit(self, adjacency: Union[sparse.csr_matrix, np.ndarray]) -> 'PCA':
"""Compute the embedding of the graph.
Parameters
----------
adjacency :
Adjacency or biadjacency matrix of the graph.
Returns
-------
self: :class:`PCA`
"""
adjacency = check_format(adjacency).asfptype()
n_row, n_col = adjacency.shape
adjacency_centered = SparseLR(adjacency, (-np.ones(n_row), adjacency.T.dot(np.ones(n_row)) / n_row))
svd = self.solver
svd.fit(adjacency_centered, self.n_components)
self.embedding_row_ = svd.singular_vectors_left_
self.embedding_col_ = svd.singular_vectors_right_
self.embedding_ = svd.singular_vectors_left_
self.singular_values_ = svd.singular_values_
self.singular_vectors_left_ = svd.singular_vectors_left_
self.singular_vectors_right_ = svd.singular_vectors_right_
return self
|
460e849e39941f56e83fa224ebb3b86ab1c32d43
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-eip/huaweicloudsdkeip/v3/model/list_publicips_request.py
|
808a3e353c5f17330a5b5bdd473a59e1e3d653f3
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 45,962
|
py
|
list_publicips_request.py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListPublicipsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'marker': 'str',
'offset': 'int',
'limit': 'int',
'fields': 'list[str]',
'sort_key': 'str',
'sort_dir': 'str',
'id': 'list[str]',
'ip_version': 'list[int]',
'public_ip_address': 'list[str]',
'public_ip_address_like': 'str',
'public_ipv6_address': 'list[str]',
'public_ipv6_address_like': 'str',
'type': 'list[str]',
'network_type': 'list[str]',
'publicip_pool_name': 'list[str]',
'status': 'list[str]',
'alias_like': 'str',
'alias': 'list[str]',
'description': 'list[str]',
'vnic_private_ip_address': 'list[str]',
'vnic_private_ip_address_like': 'str',
'vnic_device_id': 'list[str]',
'vnic_device_owner': 'list[str]',
'vnic_vpc_id': 'list[str]',
'vnic_port_id': 'list[str]',
'vnic_device_owner_prefixlike': 'str',
'vnic_instance_type': 'list[str]',
'vnic_instance_id': 'list[str]',
'bandwidth_id': 'list[str]',
'bandwidth_name': 'list[str]',
'bandwidth_name_like': 'list[str]',
'bandwidth_size': 'list[int]',
'bandwidth_share_type': 'list[str]',
'bandwidth_charge_mode': 'list[str]',
'billing_info': 'list[str]',
'billing_mode': 'str',
'associate_instance_type': 'list[str]',
'associate_instance_id': 'list[str]',
'enterprise_project_id': 'list[str]',
'public_border_group': 'list[str]',
'allow_share_bandwidth_type_any': 'list[str]'
}
attribute_map = {
'marker': 'marker',
'offset': 'offset',
'limit': 'limit',
'fields': 'fields',
'sort_key': 'sort_key',
'sort_dir': 'sort_dir',
'id': 'id',
'ip_version': 'ip_version',
'public_ip_address': 'public_ip_address',
'public_ip_address_like': 'public_ip_address_like',
'public_ipv6_address': 'public_ipv6_address',
'public_ipv6_address_like': 'public_ipv6_address_like',
'type': 'type',
'network_type': 'network_type',
'publicip_pool_name': 'publicip_pool_name',
'status': 'status',
'alias_like': 'alias_like',
'alias': 'alias',
'description': 'description',
'vnic_private_ip_address': 'vnic.private_ip_address',
'vnic_private_ip_address_like': 'vnic.private_ip_address_like',
'vnic_device_id': 'vnic.device_id',
'vnic_device_owner': 'vnic.device_owner',
'vnic_vpc_id': 'vnic.vpc_id',
'vnic_port_id': 'vnic.port_id',
'vnic_device_owner_prefixlike': 'vnic.device_owner_prefixlike',
'vnic_instance_type': 'vnic.instance_type',
'vnic_instance_id': 'vnic.instance_id',
'bandwidth_id': 'bandwidth.id',
'bandwidth_name': 'bandwidth.name',
'bandwidth_name_like': 'bandwidth.name_like',
'bandwidth_size': 'bandwidth.size',
'bandwidth_share_type': 'bandwidth.share_type',
'bandwidth_charge_mode': 'bandwidth.charge_mode',
'billing_info': 'billing_info',
'billing_mode': 'billing_mode',
'associate_instance_type': 'associate_instance_type',
'associate_instance_id': 'associate_instance_id',
'enterprise_project_id': 'enterprise_project_id',
'public_border_group': 'public_border_group',
'allow_share_bandwidth_type_any': 'allow_share_bandwidth_type_any'
}
def __init__(self, marker=None, offset=None, limit=None, fields=None, sort_key=None, sort_dir=None, id=None, ip_version=None, public_ip_address=None, public_ip_address_like=None, public_ipv6_address=None, public_ipv6_address_like=None, type=None, network_type=None, publicip_pool_name=None, status=None, alias_like=None, alias=None, description=None, vnic_private_ip_address=None, vnic_private_ip_address_like=None, vnic_device_id=None, vnic_device_owner=None, vnic_vpc_id=None, vnic_port_id=None, vnic_device_owner_prefixlike=None, vnic_instance_type=None, vnic_instance_id=None, bandwidth_id=None, bandwidth_name=None, bandwidth_name_like=None, bandwidth_size=None, bandwidth_share_type=None, bandwidth_charge_mode=None, billing_info=None, billing_mode=None, associate_instance_type=None, associate_instance_id=None, enterprise_project_id=None, public_border_group=None, allow_share_bandwidth_type_any=None):
"""ListPublicipsRequest
The model defined in huaweicloud sdk
:param marker: 分页查询起始的资源ID,为空时为查询第一页
:type marker: str
:param offset: 分页查询起始的资源序号
:type offset: int
:param limit: 每页返回的个数取值范围:0~[2000],其中2000为局点差异项,具体取值由局点决定
:type limit: int
:param fields: 显示,形式为\"fields=id&fields=owner&...\" 支持字段:id/project_id/ip_version/type/public_ip_address/public_ipv6_address/network_type/status/description/created_at/updated_at/vnic/bandwidth/associate_instance_type/associate_instance_id/lock_status/billing_info/tags/enterprise_project_id/allow_share_bandwidth_types/public_border_group/alias/publicip_pool_name/publicip_pool_id
:type fields: list[str]
:param sort_key: 排序,形式为\"sort_key=id\" 支持字段:id/public_ip_address/public_ipv6_address/ip_version/created_at/updated_at/public_border_group
:type sort_key: str
:param sort_dir: 排序方向 取值范围:asc、desc
:type sort_dir: str
:param id: 根据id过滤
:type id: list[str]
:param ip_version: 根据ip_version过滤 取值范围:4、6
:type ip_version: list[int]
:param public_ip_address: 根据public_ip_address过滤
:type public_ip_address: list[str]
:param public_ip_address_like: 根据public_ip_address过滤,模糊搜索
:type public_ip_address_like: str
:param public_ipv6_address: 根据public_ipv6_address过滤
:type public_ipv6_address: list[str]
:param public_ipv6_address_like: 根据public_ipv6_address过滤,模糊搜索
:type public_ipv6_address_like: str
:param type: 根据type过滤 取值范围:EIP、DUALSTACK、DUALSTACK_SUBNET EIP: 弹性公网IP DUALSTACK: 双栈IPV6 DUALSTACK_SUBNET: 双栈子网
:type type: list[str]
:param network_type: 根据network_type过滤 取值范围:5_telcom、5_union、5_bgp、5_sbgp、5_ipv6、5_graybgp
:type network_type: list[str]
:param publicip_pool_name: 根据publicip_pool_name过滤 取值范围:5_telcom、5_union、5_bgp、5_sbgp、5_ipv6、5_graybgp、专属池名称等
:type publicip_pool_name: list[str]
:param status: 根据status过滤 取值范围:FREEZED、DOWN、ACTIVE、ERROR
:type status: list[str]
:param alias_like: 根据alias模糊搜索
:type alias_like: str
:param alias: 根据alias过滤
:type alias: list[str]
:param description: 根据description过滤
:type description: list[str]
:param vnic_private_ip_address: 根据private_ip_address过滤
:type vnic_private_ip_address: list[str]
:param vnic_private_ip_address_like: 根据private_ip_address模糊搜索
:type vnic_private_ip_address_like: str
:param vnic_device_id: 根据device_id过滤
:type vnic_device_id: list[str]
:param vnic_device_owner: 根据device_owner过滤
:type vnic_device_owner: list[str]
:param vnic_vpc_id: 根据vpc_id过滤
:type vnic_vpc_id: list[str]
:param vnic_port_id: 根据port_id过滤
:type vnic_port_id: list[str]
:param vnic_device_owner_prefixlike: 根据device_owner_prefixlike模糊搜索
:type vnic_device_owner_prefixlike: str
:param vnic_instance_type: 根据instance_type过滤
:type vnic_instance_type: list[str]
:param vnic_instance_id: 根据instance_id过滤
:type vnic_instance_id: list[str]
:param bandwidth_id: 根据id过滤
:type bandwidth_id: list[str]
:param bandwidth_name: 根据name过滤
:type bandwidth_name: list[str]
:param bandwidth_name_like: 根据name模糊过滤
:type bandwidth_name_like: list[str]
:param bandwidth_size: 根据size过滤
:type bandwidth_size: list[int]
:param bandwidth_share_type: 根据share_type过滤
:type bandwidth_share_type: list[str]
:param bandwidth_charge_mode: 根据charge_mode过滤
:type bandwidth_charge_mode: list[str]
:param billing_info: 根据billing_info过滤
:type billing_info: list[str]
:param billing_mode: 根据订单模式过滤, 取值范围:YEARLY_MONTHLY、PAY_PER_USE
:type billing_mode: str
:param associate_instance_type: 根据associate_instance_type过滤 取值范围:PORT、NATGW、ELB、VPN、ELBV1
:type associate_instance_type: list[str]
:param associate_instance_id: 根据associate_instance_id过滤
:type associate_instance_id: list[str]
:param enterprise_project_id: 根据enterprise_project_id过滤
:type enterprise_project_id: list[str]
:param public_border_group: 根据public_border_group过滤
:type public_border_group: list[str]
:param allow_share_bandwidth_type_any: 共享带宽类型,根据任一共享带宽类型过滤EIP列表。 可以指定多个带宽类型,不同的带宽类型间用逗号分隔。
:type allow_share_bandwidth_type_any: list[str]
"""
self._marker = None
self._offset = None
self._limit = None
self._fields = None
self._sort_key = None
self._sort_dir = None
self._id = None
self._ip_version = None
self._public_ip_address = None
self._public_ip_address_like = None
self._public_ipv6_address = None
self._public_ipv6_address_like = None
self._type = None
self._network_type = None
self._publicip_pool_name = None
self._status = None
self._alias_like = None
self._alias = None
self._description = None
self._vnic_private_ip_address = None
self._vnic_private_ip_address_like = None
self._vnic_device_id = None
self._vnic_device_owner = None
self._vnic_vpc_id = None
self._vnic_port_id = None
self._vnic_device_owner_prefixlike = None
self._vnic_instance_type = None
self._vnic_instance_id = None
self._bandwidth_id = None
self._bandwidth_name = None
self._bandwidth_name_like = None
self._bandwidth_size = None
self._bandwidth_share_type = None
self._bandwidth_charge_mode = None
self._billing_info = None
self._billing_mode = None
self._associate_instance_type = None
self._associate_instance_id = None
self._enterprise_project_id = None
self._public_border_group = None
self._allow_share_bandwidth_type_any = None
self.discriminator = None
if marker is not None:
self.marker = marker
if offset is not None:
self.offset = offset
if limit is not None:
self.limit = limit
if fields is not None:
self.fields = fields
if sort_key is not None:
self.sort_key = sort_key
if sort_dir is not None:
self.sort_dir = sort_dir
if id is not None:
self.id = id
if ip_version is not None:
self.ip_version = ip_version
if public_ip_address is not None:
self.public_ip_address = public_ip_address
if public_ip_address_like is not None:
self.public_ip_address_like = public_ip_address_like
if public_ipv6_address is not None:
self.public_ipv6_address = public_ipv6_address
if public_ipv6_address_like is not None:
self.public_ipv6_address_like = public_ipv6_address_like
if type is not None:
self.type = type
if network_type is not None:
self.network_type = network_type
if publicip_pool_name is not None:
self.publicip_pool_name = publicip_pool_name
if status is not None:
self.status = status
if alias_like is not None:
self.alias_like = alias_like
if alias is not None:
self.alias = alias
if description is not None:
self.description = description
if vnic_private_ip_address is not None:
self.vnic_private_ip_address = vnic_private_ip_address
if vnic_private_ip_address_like is not None:
self.vnic_private_ip_address_like = vnic_private_ip_address_like
if vnic_device_id is not None:
self.vnic_device_id = vnic_device_id
if vnic_device_owner is not None:
self.vnic_device_owner = vnic_device_owner
if vnic_vpc_id is not None:
self.vnic_vpc_id = vnic_vpc_id
if vnic_port_id is not None:
self.vnic_port_id = vnic_port_id
if vnic_device_owner_prefixlike is not None:
self.vnic_device_owner_prefixlike = vnic_device_owner_prefixlike
if vnic_instance_type is not None:
self.vnic_instance_type = vnic_instance_type
if vnic_instance_id is not None:
self.vnic_instance_id = vnic_instance_id
if bandwidth_id is not None:
self.bandwidth_id = bandwidth_id
if bandwidth_name is not None:
self.bandwidth_name = bandwidth_name
if bandwidth_name_like is not None:
self.bandwidth_name_like = bandwidth_name_like
if bandwidth_size is not None:
self.bandwidth_size = bandwidth_size
if bandwidth_share_type is not None:
self.bandwidth_share_type = bandwidth_share_type
if bandwidth_charge_mode is not None:
self.bandwidth_charge_mode = bandwidth_charge_mode
if billing_info is not None:
self.billing_info = billing_info
if billing_mode is not None:
self.billing_mode = billing_mode
if associate_instance_type is not None:
self.associate_instance_type = associate_instance_type
if associate_instance_id is not None:
self.associate_instance_id = associate_instance_id
if enterprise_project_id is not None:
self.enterprise_project_id = enterprise_project_id
if public_border_group is not None:
self.public_border_group = public_border_group
if allow_share_bandwidth_type_any is not None:
self.allow_share_bandwidth_type_any = allow_share_bandwidth_type_any
@property
def marker(self):
"""Gets the marker of this ListPublicipsRequest.
分页查询起始的资源ID,为空时为查询第一页
:return: The marker of this ListPublicipsRequest.
:rtype: str
"""
return self._marker
@marker.setter
def marker(self, marker):
"""Sets the marker of this ListPublicipsRequest.
分页查询起始的资源ID,为空时为查询第一页
:param marker: The marker of this ListPublicipsRequest.
:type marker: str
"""
self._marker = marker
@property
def offset(self):
"""Gets the offset of this ListPublicipsRequest.
分页查询起始的资源序号
:return: The offset of this ListPublicipsRequest.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListPublicipsRequest.
分页查询起始的资源序号
:param offset: The offset of this ListPublicipsRequest.
:type offset: int
"""
self._offset = offset
@property
def limit(self):
"""Gets the limit of this ListPublicipsRequest.
每页返回的个数取值范围:0~[2000],其中2000为局点差异项,具体取值由局点决定
:return: The limit of this ListPublicipsRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListPublicipsRequest.
每页返回的个数取值范围:0~[2000],其中2000为局点差异项,具体取值由局点决定
:param limit: The limit of this ListPublicipsRequest.
:type limit: int
"""
self._limit = limit
@property
def fields(self):
"""Gets the fields of this ListPublicipsRequest.
显示,形式为\"fields=id&fields=owner&...\" 支持字段:id/project_id/ip_version/type/public_ip_address/public_ipv6_address/network_type/status/description/created_at/updated_at/vnic/bandwidth/associate_instance_type/associate_instance_id/lock_status/billing_info/tags/enterprise_project_id/allow_share_bandwidth_types/public_border_group/alias/publicip_pool_name/publicip_pool_id
:return: The fields of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._fields
@fields.setter
def fields(self, fields):
"""Sets the fields of this ListPublicipsRequest.
显示,形式为\"fields=id&fields=owner&...\" 支持字段:id/project_id/ip_version/type/public_ip_address/public_ipv6_address/network_type/status/description/created_at/updated_at/vnic/bandwidth/associate_instance_type/associate_instance_id/lock_status/billing_info/tags/enterprise_project_id/allow_share_bandwidth_types/public_border_group/alias/publicip_pool_name/publicip_pool_id
:param fields: The fields of this ListPublicipsRequest.
:type fields: list[str]
"""
self._fields = fields
@property
def sort_key(self):
"""Gets the sort_key of this ListPublicipsRequest.
排序,形式为\"sort_key=id\" 支持字段:id/public_ip_address/public_ipv6_address/ip_version/created_at/updated_at/public_border_group
:return: The sort_key of this ListPublicipsRequest.
:rtype: str
"""
return self._sort_key
@sort_key.setter
def sort_key(self, sort_key):
"""Sets the sort_key of this ListPublicipsRequest.
排序,形式为\"sort_key=id\" 支持字段:id/public_ip_address/public_ipv6_address/ip_version/created_at/updated_at/public_border_group
:param sort_key: The sort_key of this ListPublicipsRequest.
:type sort_key: str
"""
self._sort_key = sort_key
@property
def sort_dir(self):
"""Gets the sort_dir of this ListPublicipsRequest.
排序方向 取值范围:asc、desc
:return: The sort_dir of this ListPublicipsRequest.
:rtype: str
"""
return self._sort_dir
@sort_dir.setter
def sort_dir(self, sort_dir):
"""Sets the sort_dir of this ListPublicipsRequest.
排序方向 取值范围:asc、desc
:param sort_dir: The sort_dir of this ListPublicipsRequest.
:type sort_dir: str
"""
self._sort_dir = sort_dir
@property
def id(self):
"""Gets the id of this ListPublicipsRequest.
根据id过滤
:return: The id of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ListPublicipsRequest.
根据id过滤
:param id: The id of this ListPublicipsRequest.
:type id: list[str]
"""
self._id = id
@property
def ip_version(self):
"""Gets the ip_version of this ListPublicipsRequest.
根据ip_version过滤 取值范围:4、6
:return: The ip_version of this ListPublicipsRequest.
:rtype: list[int]
"""
return self._ip_version
@ip_version.setter
def ip_version(self, ip_version):
"""Sets the ip_version of this ListPublicipsRequest.
根据ip_version过滤 取值范围:4、6
:param ip_version: The ip_version of this ListPublicipsRequest.
:type ip_version: list[int]
"""
self._ip_version = ip_version
@property
def public_ip_address(self):
"""Gets the public_ip_address of this ListPublicipsRequest.
根据public_ip_address过滤
:return: The public_ip_address of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._public_ip_address
@public_ip_address.setter
def public_ip_address(self, public_ip_address):
"""Sets the public_ip_address of this ListPublicipsRequest.
根据public_ip_address过滤
:param public_ip_address: The public_ip_address of this ListPublicipsRequest.
:type public_ip_address: list[str]
"""
self._public_ip_address = public_ip_address
@property
def public_ip_address_like(self):
"""Gets the public_ip_address_like of this ListPublicipsRequest.
根据public_ip_address过滤,模糊搜索
:return: The public_ip_address_like of this ListPublicipsRequest.
:rtype: str
"""
return self._public_ip_address_like
@public_ip_address_like.setter
def public_ip_address_like(self, public_ip_address_like):
"""Sets the public_ip_address_like of this ListPublicipsRequest.
根据public_ip_address过滤,模糊搜索
:param public_ip_address_like: The public_ip_address_like of this ListPublicipsRequest.
:type public_ip_address_like: str
"""
self._public_ip_address_like = public_ip_address_like
@property
def public_ipv6_address(self):
"""Gets the public_ipv6_address of this ListPublicipsRequest.
根据public_ipv6_address过滤
:return: The public_ipv6_address of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._public_ipv6_address
@public_ipv6_address.setter
def public_ipv6_address(self, public_ipv6_address):
"""Sets the public_ipv6_address of this ListPublicipsRequest.
根据public_ipv6_address过滤
:param public_ipv6_address: The public_ipv6_address of this ListPublicipsRequest.
:type public_ipv6_address: list[str]
"""
self._public_ipv6_address = public_ipv6_address
@property
def public_ipv6_address_like(self):
"""Gets the public_ipv6_address_like of this ListPublicipsRequest.
根据public_ipv6_address过滤,模糊搜索
:return: The public_ipv6_address_like of this ListPublicipsRequest.
:rtype: str
"""
return self._public_ipv6_address_like
@public_ipv6_address_like.setter
def public_ipv6_address_like(self, public_ipv6_address_like):
"""Sets the public_ipv6_address_like of this ListPublicipsRequest.
根据public_ipv6_address过滤,模糊搜索
:param public_ipv6_address_like: The public_ipv6_address_like of this ListPublicipsRequest.
:type public_ipv6_address_like: str
"""
self._public_ipv6_address_like = public_ipv6_address_like
@property
def type(self):
"""Gets the type of this ListPublicipsRequest.
根据type过滤 取值范围:EIP、DUALSTACK、DUALSTACK_SUBNET EIP: 弹性公网IP DUALSTACK: 双栈IPV6 DUALSTACK_SUBNET: 双栈子网
:return: The type of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ListPublicipsRequest.
根据type过滤 取值范围:EIP、DUALSTACK、DUALSTACK_SUBNET EIP: 弹性公网IP DUALSTACK: 双栈IPV6 DUALSTACK_SUBNET: 双栈子网
:param type: The type of this ListPublicipsRequest.
:type type: list[str]
"""
self._type = type
@property
def network_type(self):
"""Gets the network_type of this ListPublicipsRequest.
根据network_type过滤 取值范围:5_telcom、5_union、5_bgp、5_sbgp、5_ipv6、5_graybgp
:return: The network_type of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._network_type
@network_type.setter
def network_type(self, network_type):
"""Sets the network_type of this ListPublicipsRequest.
根据network_type过滤 取值范围:5_telcom、5_union、5_bgp、5_sbgp、5_ipv6、5_graybgp
:param network_type: The network_type of this ListPublicipsRequest.
:type network_type: list[str]
"""
self._network_type = network_type
@property
def publicip_pool_name(self):
"""Gets the publicip_pool_name of this ListPublicipsRequest.
根据publicip_pool_name过滤 取值范围:5_telcom、5_union、5_bgp、5_sbgp、5_ipv6、5_graybgp、专属池名称等
:return: The publicip_pool_name of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._publicip_pool_name
@publicip_pool_name.setter
def publicip_pool_name(self, publicip_pool_name):
"""Sets the publicip_pool_name of this ListPublicipsRequest.
根据publicip_pool_name过滤 取值范围:5_telcom、5_union、5_bgp、5_sbgp、5_ipv6、5_graybgp、专属池名称等
:param publicip_pool_name: The publicip_pool_name of this ListPublicipsRequest.
:type publicip_pool_name: list[str]
"""
self._publicip_pool_name = publicip_pool_name
@property
def status(self):
"""Gets the status of this ListPublicipsRequest.
根据status过滤 取值范围:FREEZED、DOWN、ACTIVE、ERROR
:return: The status of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ListPublicipsRequest.
根据status过滤 取值范围:FREEZED、DOWN、ACTIVE、ERROR
:param status: The status of this ListPublicipsRequest.
:type status: list[str]
"""
self._status = status
@property
def alias_like(self):
"""Gets the alias_like of this ListPublicipsRequest.
根据alias模糊搜索
:return: The alias_like of this ListPublicipsRequest.
:rtype: str
"""
return self._alias_like
@alias_like.setter
def alias_like(self, alias_like):
"""Sets the alias_like of this ListPublicipsRequest.
根据alias模糊搜索
:param alias_like: The alias_like of this ListPublicipsRequest.
:type alias_like: str
"""
self._alias_like = alias_like
@property
def alias(self):
"""Gets the alias of this ListPublicipsRequest.
根据alias过滤
:return: The alias of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._alias
@alias.setter
def alias(self, alias):
"""Sets the alias of this ListPublicipsRequest.
根据alias过滤
:param alias: The alias of this ListPublicipsRequest.
:type alias: list[str]
"""
self._alias = alias
@property
def description(self):
"""Gets the description of this ListPublicipsRequest.
根据description过滤
:return: The description of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this ListPublicipsRequest.
根据description过滤
:param description: The description of this ListPublicipsRequest.
:type description: list[str]
"""
self._description = description
@property
def vnic_private_ip_address(self):
"""Gets the vnic_private_ip_address of this ListPublicipsRequest.
根据private_ip_address过滤
:return: The vnic_private_ip_address of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._vnic_private_ip_address
@vnic_private_ip_address.setter
def vnic_private_ip_address(self, vnic_private_ip_address):
"""Sets the vnic_private_ip_address of this ListPublicipsRequest.
根据private_ip_address过滤
:param vnic_private_ip_address: The vnic_private_ip_address of this ListPublicipsRequest.
:type vnic_private_ip_address: list[str]
"""
self._vnic_private_ip_address = vnic_private_ip_address
@property
def vnic_private_ip_address_like(self):
"""Gets the vnic_private_ip_address_like of this ListPublicipsRequest.
根据private_ip_address模糊搜索
:return: The vnic_private_ip_address_like of this ListPublicipsRequest.
:rtype: str
"""
return self._vnic_private_ip_address_like
@vnic_private_ip_address_like.setter
def vnic_private_ip_address_like(self, vnic_private_ip_address_like):
"""Sets the vnic_private_ip_address_like of this ListPublicipsRequest.
根据private_ip_address模糊搜索
:param vnic_private_ip_address_like: The vnic_private_ip_address_like of this ListPublicipsRequest.
:type vnic_private_ip_address_like: str
"""
self._vnic_private_ip_address_like = vnic_private_ip_address_like
@property
def vnic_device_id(self):
"""Gets the vnic_device_id of this ListPublicipsRequest.
根据device_id过滤
:return: The vnic_device_id of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._vnic_device_id
@vnic_device_id.setter
def vnic_device_id(self, vnic_device_id):
"""Sets the vnic_device_id of this ListPublicipsRequest.
根据device_id过滤
:param vnic_device_id: The vnic_device_id of this ListPublicipsRequest.
:type vnic_device_id: list[str]
"""
self._vnic_device_id = vnic_device_id
@property
def vnic_device_owner(self):
"""Gets the vnic_device_owner of this ListPublicipsRequest.
根据device_owner过滤
:return: The vnic_device_owner of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._vnic_device_owner
@vnic_device_owner.setter
def vnic_device_owner(self, vnic_device_owner):
"""Sets the vnic_device_owner of this ListPublicipsRequest.
根据device_owner过滤
:param vnic_device_owner: The vnic_device_owner of this ListPublicipsRequest.
:type vnic_device_owner: list[str]
"""
self._vnic_device_owner = vnic_device_owner
@property
def vnic_vpc_id(self):
"""Gets the vnic_vpc_id of this ListPublicipsRequest.
根据vpc_id过滤
:return: The vnic_vpc_id of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._vnic_vpc_id
@vnic_vpc_id.setter
def vnic_vpc_id(self, vnic_vpc_id):
"""Sets the vnic_vpc_id of this ListPublicipsRequest.
根据vpc_id过滤
:param vnic_vpc_id: The vnic_vpc_id of this ListPublicipsRequest.
:type vnic_vpc_id: list[str]
"""
self._vnic_vpc_id = vnic_vpc_id
@property
def vnic_port_id(self):
"""Gets the vnic_port_id of this ListPublicipsRequest.
根据port_id过滤
:return: The vnic_port_id of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._vnic_port_id
@vnic_port_id.setter
def vnic_port_id(self, vnic_port_id):
"""Sets the vnic_port_id of this ListPublicipsRequest.
根据port_id过滤
:param vnic_port_id: The vnic_port_id of this ListPublicipsRequest.
:type vnic_port_id: list[str]
"""
self._vnic_port_id = vnic_port_id
@property
def vnic_device_owner_prefixlike(self):
"""Gets the vnic_device_owner_prefixlike of this ListPublicipsRequest.
根据device_owner_prefixlike模糊搜索
:return: The vnic_device_owner_prefixlike of this ListPublicipsRequest.
:rtype: str
"""
return self._vnic_device_owner_prefixlike
@vnic_device_owner_prefixlike.setter
def vnic_device_owner_prefixlike(self, vnic_device_owner_prefixlike):
"""Sets the vnic_device_owner_prefixlike of this ListPublicipsRequest.
根据device_owner_prefixlike模糊搜索
:param vnic_device_owner_prefixlike: The vnic_device_owner_prefixlike of this ListPublicipsRequest.
:type vnic_device_owner_prefixlike: str
"""
self._vnic_device_owner_prefixlike = vnic_device_owner_prefixlike
@property
def vnic_instance_type(self):
"""Gets the vnic_instance_type of this ListPublicipsRequest.
根据instance_type过滤
:return: The vnic_instance_type of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._vnic_instance_type
@vnic_instance_type.setter
def vnic_instance_type(self, vnic_instance_type):
"""Sets the vnic_instance_type of this ListPublicipsRequest.
根据instance_type过滤
:param vnic_instance_type: The vnic_instance_type of this ListPublicipsRequest.
:type vnic_instance_type: list[str]
"""
self._vnic_instance_type = vnic_instance_type
@property
def vnic_instance_id(self):
"""Gets the vnic_instance_id of this ListPublicipsRequest.
根据instance_id过滤
:return: The vnic_instance_id of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._vnic_instance_id
@vnic_instance_id.setter
def vnic_instance_id(self, vnic_instance_id):
"""Sets the vnic_instance_id of this ListPublicipsRequest.
根据instance_id过滤
:param vnic_instance_id: The vnic_instance_id of this ListPublicipsRequest.
:type vnic_instance_id: list[str]
"""
self._vnic_instance_id = vnic_instance_id
@property
def bandwidth_id(self):
"""Gets the bandwidth_id of this ListPublicipsRequest.
根据id过滤
:return: The bandwidth_id of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._bandwidth_id
@bandwidth_id.setter
def bandwidth_id(self, bandwidth_id):
"""Sets the bandwidth_id of this ListPublicipsRequest.
根据id过滤
:param bandwidth_id: The bandwidth_id of this ListPublicipsRequest.
:type bandwidth_id: list[str]
"""
self._bandwidth_id = bandwidth_id
@property
def bandwidth_name(self):
"""Gets the bandwidth_name of this ListPublicipsRequest.
根据name过滤
:return: The bandwidth_name of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._bandwidth_name
@bandwidth_name.setter
def bandwidth_name(self, bandwidth_name):
"""Sets the bandwidth_name of this ListPublicipsRequest.
根据name过滤
:param bandwidth_name: The bandwidth_name of this ListPublicipsRequest.
:type bandwidth_name: list[str]
"""
self._bandwidth_name = bandwidth_name
@property
def bandwidth_name_like(self):
"""Gets the bandwidth_name_like of this ListPublicipsRequest.
根据name模糊过滤
:return: The bandwidth_name_like of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._bandwidth_name_like
@bandwidth_name_like.setter
def bandwidth_name_like(self, bandwidth_name_like):
"""Sets the bandwidth_name_like of this ListPublicipsRequest.
根据name模糊过滤
:param bandwidth_name_like: The bandwidth_name_like of this ListPublicipsRequest.
:type bandwidth_name_like: list[str]
"""
self._bandwidth_name_like = bandwidth_name_like
@property
def bandwidth_size(self):
"""Gets the bandwidth_size of this ListPublicipsRequest.
根据size过滤
:return: The bandwidth_size of this ListPublicipsRequest.
:rtype: list[int]
"""
return self._bandwidth_size
@bandwidth_size.setter
def bandwidth_size(self, bandwidth_size):
"""Sets the bandwidth_size of this ListPublicipsRequest.
根据size过滤
:param bandwidth_size: The bandwidth_size of this ListPublicipsRequest.
:type bandwidth_size: list[int]
"""
self._bandwidth_size = bandwidth_size
@property
def bandwidth_share_type(self):
"""Gets the bandwidth_share_type of this ListPublicipsRequest.
根据share_type过滤
:return: The bandwidth_share_type of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._bandwidth_share_type
@bandwidth_share_type.setter
def bandwidth_share_type(self, bandwidth_share_type):
"""Sets the bandwidth_share_type of this ListPublicipsRequest.
根据share_type过滤
:param bandwidth_share_type: The bandwidth_share_type of this ListPublicipsRequest.
:type bandwidth_share_type: list[str]
"""
self._bandwidth_share_type = bandwidth_share_type
@property
def bandwidth_charge_mode(self):
"""Gets the bandwidth_charge_mode of this ListPublicipsRequest.
根据charge_mode过滤
:return: The bandwidth_charge_mode of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._bandwidth_charge_mode
@bandwidth_charge_mode.setter
def bandwidth_charge_mode(self, bandwidth_charge_mode):
"""Sets the bandwidth_charge_mode of this ListPublicipsRequest.
根据charge_mode过滤
:param bandwidth_charge_mode: The bandwidth_charge_mode of this ListPublicipsRequest.
:type bandwidth_charge_mode: list[str]
"""
self._bandwidth_charge_mode = bandwidth_charge_mode
@property
def billing_info(self):
"""Gets the billing_info of this ListPublicipsRequest.
根据billing_info过滤
:return: The billing_info of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._billing_info
@billing_info.setter
def billing_info(self, billing_info):
"""Sets the billing_info of this ListPublicipsRequest.
根据billing_info过滤
:param billing_info: The billing_info of this ListPublicipsRequest.
:type billing_info: list[str]
"""
self._billing_info = billing_info
@property
def billing_mode(self):
"""Gets the billing_mode of this ListPublicipsRequest.
根据订单模式过滤, 取值范围:YEARLY_MONTHLY、PAY_PER_USE
:return: The billing_mode of this ListPublicipsRequest.
:rtype: str
"""
return self._billing_mode
@billing_mode.setter
def billing_mode(self, billing_mode):
"""Sets the billing_mode of this ListPublicipsRequest.
根据订单模式过滤, 取值范围:YEARLY_MONTHLY、PAY_PER_USE
:param billing_mode: The billing_mode of this ListPublicipsRequest.
:type billing_mode: str
"""
self._billing_mode = billing_mode
@property
def associate_instance_type(self):
"""Gets the associate_instance_type of this ListPublicipsRequest.
根据associate_instance_type过滤 取值范围:PORT、NATGW、ELB、VPN、ELBV1
:return: The associate_instance_type of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._associate_instance_type
@associate_instance_type.setter
def associate_instance_type(self, associate_instance_type):
"""Sets the associate_instance_type of this ListPublicipsRequest.
根据associate_instance_type过滤 取值范围:PORT、NATGW、ELB、VPN、ELBV1
:param associate_instance_type: The associate_instance_type of this ListPublicipsRequest.
:type associate_instance_type: list[str]
"""
self._associate_instance_type = associate_instance_type
@property
def associate_instance_id(self):
"""Gets the associate_instance_id of this ListPublicipsRequest.
根据associate_instance_id过滤
:return: The associate_instance_id of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._associate_instance_id
@associate_instance_id.setter
def associate_instance_id(self, associate_instance_id):
"""Sets the associate_instance_id of this ListPublicipsRequest.
根据associate_instance_id过滤
:param associate_instance_id: The associate_instance_id of this ListPublicipsRequest.
:type associate_instance_id: list[str]
"""
self._associate_instance_id = associate_instance_id
@property
def enterprise_project_id(self):
"""Gets the enterprise_project_id of this ListPublicipsRequest.
根据enterprise_project_id过滤
:return: The enterprise_project_id of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._enterprise_project_id
@enterprise_project_id.setter
def enterprise_project_id(self, enterprise_project_id):
"""Sets the enterprise_project_id of this ListPublicipsRequest.
根据enterprise_project_id过滤
:param enterprise_project_id: The enterprise_project_id of this ListPublicipsRequest.
:type enterprise_project_id: list[str]
"""
self._enterprise_project_id = enterprise_project_id
@property
def public_border_group(self):
"""Gets the public_border_group of this ListPublicipsRequest.
根据public_border_group过滤
:return: The public_border_group of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._public_border_group
@public_border_group.setter
def public_border_group(self, public_border_group):
"""Sets the public_border_group of this ListPublicipsRequest.
根据public_border_group过滤
:param public_border_group: The public_border_group of this ListPublicipsRequest.
:type public_border_group: list[str]
"""
self._public_border_group = public_border_group
@property
def allow_share_bandwidth_type_any(self):
"""Gets the allow_share_bandwidth_type_any of this ListPublicipsRequest.
共享带宽类型,根据任一共享带宽类型过滤EIP列表。 可以指定多个带宽类型,不同的带宽类型间用逗号分隔。
:return: The allow_share_bandwidth_type_any of this ListPublicipsRequest.
:rtype: list[str]
"""
return self._allow_share_bandwidth_type_any
@allow_share_bandwidth_type_any.setter
def allow_share_bandwidth_type_any(self, allow_share_bandwidth_type_any):
"""Sets the allow_share_bandwidth_type_any of this ListPublicipsRequest.
共享带宽类型,根据任一共享带宽类型过滤EIP列表。 可以指定多个带宽类型,不同的带宽类型间用逗号分隔。
:param allow_share_bandwidth_type_any: The allow_share_bandwidth_type_any of this ListPublicipsRequest.
:type allow_share_bandwidth_type_any: list[str]
"""
self._allow_share_bandwidth_type_any = allow_share_bandwidth_type_any
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListPublicipsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
e6946a12eedfd9d91bbd8675a722055fc184a5f7
|
160f08e768d7271f9522ad2597ac4ee79c04477a
|
/src/c3nav/mesh/messages.py
|
235f10f810eb496f293197818684fe23b83bcef9
|
[
"Apache-2.0"
] |
permissive
|
c3nav/c3nav
|
6254724dfc8589ee03c6028577befd7c65b05857
|
1a4ef5caa06ddacc8d9370b5adcee248fd4f55f7
|
refs/heads/main
| 2023-08-04T08:36:18.431458
| 2023-07-24T09:57:18
| 2023-07-24T09:57:18
| 56,852,994
| 140
| 47
|
Apache-2.0
| 2023-07-05T22:55:27
| 2016-04-22T12:13:51
|
Python
|
UTF-8
|
Python
| false
| false
| 5,192
|
py
|
messages.py
|
from dataclasses import asdict, dataclass, field, fields, is_dataclass
from enum import IntEnum, unique
from typing import TypeVar
from c3nav.mesh.dataformats import (BoolFormat, FixedStrFormat, HexFormat, LedConfig, LedConfigFormat,
MacAddressesListFormat, MacAddressFormat, SimpleFormat, VarStrFormat)
ROOT_ADDRESS = '00:00:00:00:00:00'
PARENT_ADDRESS = '00:00:00:ff:ff:ff'
NO_LAYER = 0xFF
@unique
class MessageType(IntEnum):
ECHO_REQUEST = 0x01
ECHO_RESPONSE = 0x02
MESH_SIGNIN = 0x03
MESH_LAYER_ANNOUNCE = 0x04
MESH_ADD_DESTINATIONS = 0x05
MESH_REMOVE_DESTINATIONS = 0x06
CONFIG_DUMP = 0x10
CONFIG_FIRMWARE = 0x11
CONFIG_POSITION = 0x12
CONFIG_LED = 0x13
CONFIG_UPLINK = 0x14
M = TypeVar('M', bound='Message')
@dataclass
class Message:
dst: str = field(metadata={'format': MacAddressFormat()})
src: str = field(metadata={'format': MacAddressFormat()})
msg_id: int = field(metadata={'format': SimpleFormat('B')}, init=False, repr=False)
msg_types = {}
# noinspection PyMethodOverriding
def __init_subclass__(cls, /, msg_id=None, **kwargs):
super().__init_subclass__(**kwargs)
if msg_id:
cls.msg_id = msg_id
if msg_id in Message.msg_types:
raise TypeError('duplicate use of msg_id %d' % msg_id)
Message.msg_types[msg_id] = cls
def encode(self):
data = bytes()
for field_ in fields(self):
data += field_.metadata['format'].encode(getattr(self, field_.name))
return data
@classmethod
def decode(cls, data: bytes) -> M:
# print('decode', data.hex(' '))
klass = cls.msg_types[data[12]]
values = {}
for field_ in fields(klass):
values[field_.name], data = field_.metadata['format'].decode(data)
values.pop('msg_id')
return klass(**values)
def tojson(self):
return asdict(self)
@classmethod
def fromjson(cls, data) -> M:
kwargs = data.copy()
klass = cls.msg_types[kwargs.pop('msg_id')]
for field_ in fields(klass):
if is_dataclass(field_.type):
kwargs[field_.name] = field_.type.fromjson(kwargs[field_.name])
return klass(**kwargs)
@dataclass
class EchoRequestMessage(Message, msg_id=MessageType.ECHO_REQUEST):
content: str = field(default='', metadata={'format': VarStrFormat()})
@dataclass
class EchoResponseMessage(Message, msg_id=MessageType.ECHO_RESPONSE):
content: str = field(default='', metadata={'format': VarStrFormat()})
@dataclass
class MeshSigninMessage(Message, msg_id=MessageType.MESH_SIGNIN):
pass
@dataclass
class MeshLayerAnnounceMessage(Message, msg_id=MessageType.MESH_LAYER_ANNOUNCE):
layer: int = field(metadata={'format': SimpleFormat('B')})
@dataclass
class MeshAddDestinationsMessage(Message, msg_id=MessageType.MESH_ADD_DESTINATIONS):
mac_addresses: list[str] = field(default_factory=list, metadata={'format': MacAddressesListFormat()})
@dataclass
class MeshRemoveDestinationsMessage(Message, msg_id=MessageType.MESH_REMOVE_DESTINATIONS):
mac_addresses: list[str] = field(default_factory=list, metadata={'format': MacAddressesListFormat()})
@dataclass
class ConfigDumpMessage(Message, msg_id=MessageType.CONFIG_DUMP):
pass
@dataclass
class ConfigFirmwareMessage(Message, msg_id=MessageType.CONFIG_FIRMWARE):
magic_word: int = field(metadata={'format': SimpleFormat('I')}, repr=False)
secure_version: int = field(metadata={'format': SimpleFormat('I')})
reserv1: list[int] = field(metadata={'format': SimpleFormat('2I')}, repr=False)
version: str = field(metadata={'format': FixedStrFormat(32)})
project_name: str = field(metadata={'format': FixedStrFormat(32)})
compile_time: str = field(metadata={'format': FixedStrFormat(16)})
compile_date: str = field(metadata={'format': FixedStrFormat(16)})
idf_version: str = field(metadata={'format': FixedStrFormat(32)})
app_elf_sha256: str = field(metadata={'format': HexFormat(32)})
reserv2: list[int] = field(metadata={'format': SimpleFormat('20I')}, repr=False)
@dataclass
class ConfigPositionMessage(Message, msg_id=MessageType.CONFIG_POSITION):
x_pos: int = field(metadata={'format': SimpleFormat('I')})
y_pos: int = field(metadata={'format': SimpleFormat('I')})
z_pos: int = field(metadata={'format': SimpleFormat('H')})
@dataclass
class ConfigLedMessage(Message, msg_id=MessageType.CONFIG_LED):
led_config: LedConfig = field(metadata={'format': LedConfigFormat()})
@dataclass
class ConfigUplinkMessage(Message, msg_id=MessageType.CONFIG_UPLINK):
enabled: bool = field(metadata={'format': BoolFormat()})
ssid: str = field(metadata={'format': FixedStrFormat(32)})
password: str = field(metadata={'format': FixedStrFormat(64)})
channel: int = field(metadata={'format': SimpleFormat('B')})
udp: bool = field(metadata={'format': BoolFormat()})
ssl: bool = field(metadata={'format': BoolFormat()})
host: str = field(metadata={'format': FixedStrFormat(64)})
port: int = field(metadata={'format': SimpleFormat('H')})
|
0fa1cc7acd73316c19a66a674dab13e872aa0f57
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/refactoring/move/cleanupImportsAfterMove/before/src/use2.py
|
83a79a1d21e7a5e014ebf95ac5989ab53d24026e
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 85
|
py
|
use2.py
|
from main import *
from lib import B
from lib import A, C
print(C1, C2, C3, A, B, C)
|
c1f5796a5d5bdf52b0114c5e2b72dd3de59e1864
|
1d91dc7db54526508a61d78ad915192ad076ddcf
|
/pymzml/file_classes/bytesMzml.py
|
61e8c5c14cfb186d64ded6af87ebeb8ad417b95d
|
[
"MIT"
] |
permissive
|
pymzml/pymzML
|
1ef331c435fa616a2f558ea0b49784a9f33880ba
|
5138390bc7e4b7590243b0e9e5f2f5cbb7e26bde
|
refs/heads/dev
| 2023-09-06T05:22:55.077586
| 2023-03-20T16:05:59
| 2023-03-20T16:05:59
| 3,152,006
| 142
| 89
|
MIT
| 2023-09-07T08:43:47
| 2012-01-11T07:45:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,158
|
py
|
bytesMzml.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Interface for binary streams of uncompressed mzML.
@author: Sylvain Le Bon
"""
from io import TextIOWrapper
from .. import regex_patterns
from .standardMzml import StandardMzml
class BytesMzml(StandardMzml):
def __init__(self, binary, encoding, build_index_from_scratch=False):
"""
Initalize Wrapper object for standard mzML files.
Arguments:
path (str) : path to the file
encoding (str) : encoding of the file
"""
self.binary = binary
self.file_handler = self.get_file_handler(encoding)
self.offset_dict = dict()
self.spec_open = regex_patterns.SPECTRUM_OPEN_PATTERN
self.spec_close = regex_patterns.SPECTRUM_CLOSE_PATTERN
if build_index_from_scratch is True:
seeker = self.get_binary_file_handler()
self._build_index_from_scratch(seeker)
seeker.close()
def get_binary_file_handler(self):
self.binary.seek(0)
return self.binary
def get_file_handler(self, encoding):
return TextIOWrapper(self.binary, encoding=encoding)
|
97aa0c11f92b31c02e29972a25a4a5366c4ca495
|
2481cde6506743565dff2b405a2396daf208ab3e
|
/src/events/migrations/0032_event_standings_urls.py
|
154ac7358df5a08b70476beb765856e6e4abb8b6
|
[
"Apache-2.0"
] |
permissive
|
aropan/clist
|
4819a3036d179595e4df8c646aff2ed593b9dad3
|
5c805b2af71acee97f993f19d8d4e229f7f5b411
|
refs/heads/master
| 2023-08-31T11:15:17.987776
| 2023-08-27T21:51:14
| 2023-08-27T21:52:16
| 187,111,853
| 276
| 35
|
Apache-2.0
| 2023-09-06T18:42:53
| 2019-05-16T22:57:03
|
Python
|
UTF-8
|
Python
| false
| false
| 451
|
py
|
0032_event_standings_urls.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-10-20 23:53
from django.db import migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('events', '0031_event_information'),
]
operations = [
migrations.AddField(
model_name='event',
name='standings_urls',
field=jsonfield.fields.JSONField(blank=True, default={}),
),
]
|
2ec3dfa79b060a4a7423f11281f8b6d94434431a
|
605886d19259af074c8b8aa8637f8dc17d1be959
|
/test-suite/exonum-py-tests/exonum_tests/freeze.py
|
01402c8c6b2f04534cd1bf87594db214fdcbac70
|
[
"Apache-2.0"
] |
permissive
|
exonum/exonum
|
6a412a37517d1352e613597f0186290e268599f7
|
cf42a554b29bd1f036f39aab67ff68f7dfbadf51
|
refs/heads/master
| 2023-03-11T06:09:11.074153
| 2023-03-10T12:11:41
| 2023-03-10T12:11:41
| 80,729,805
| 1,334
| 332
|
Apache-2.0
| 2023-05-25T16:02:20
| 2017-02-02T13:56:57
|
Rust
|
UTF-8
|
Python
| false
| false
| 5,672
|
py
|
freeze.py
|
import unittest
from exonum_client import ExonumClient
from exonum_client.crypto import KeyPair
from exonum_launcher.configuration import Configuration
from exonum_launcher.launcher import Launcher
from suite import (
assert_processes_exited_successfully,
run_4_nodes,
wait_network_to_start,
ExonumCryptoAdvancedClient,
generate_config,
)
class FreezeTests(unittest.TestCase):
"""Tests for a checking service freezing mechanism."""
def setUp(self):
self.network = run_4_nodes("exonum-cryptocurrency-advanced")
self.addCleanup(self._tear_down, False)
wait_network_to_start(self.network)
def test_freeze_service(self):
host, public_port, private_port = self.network.api_address(0)
client = ExonumClient(host, public_port, private_port)
# Create wallet
alice_keys = KeyPair.generate()
with ExonumCryptoAdvancedClient(client) as crypto_client:
crypto_client.create_wallet(alice_keys, "Alice")
with client.create_subscriber("transactions") as subscriber:
subscriber.wait_for_new_event()
alice_balance = crypto_client.get_balance(alice_keys)
self.assertEqual(alice_balance, 100)
# Freeze the service
instances = {"crypto": {"artifact": "cryptocurrency", "action": "freeze"}}
cryptocurrency_advanced_config_dict = generate_config(self.network, instances=instances, artifact_action="none")
cryptocurrency_advanced_config = Configuration(cryptocurrency_advanced_config_dict)
with Launcher(cryptocurrency_advanced_config) as launcher:
launcher.deploy_all()
launcher.wait_for_deploy()
launcher.start_all()
launcher.wait_for_start()
# Check that the service status has been changed to `frozen`.
for service in client.public_api.available_services().json()["services"]:
if service["spec"]["name"] == "crypto":
self.assertEqual(service["status"]["type"], "frozen")
# Try to create a new wallet. The operation should fail.
with ExonumCryptoAdvancedClient(client) as crypto_client:
bob_keys = KeyPair.generate()
response = crypto_client.create_wallet(bob_keys, "Bob")
self.assertEqual(response.status_code, 400)
# Because the service is frozen, transaction should be inadmissible.
self.assertEqual(response.json()["title"], "Failed to add transaction to memory pool")
# Check that we can use service endpoints for data retrieving. Check wallet once again.
with ExonumCryptoAdvancedClient(client) as crypto_client:
alice_balance = crypto_client.get_balance(alice_keys)
self.assertEqual(alice_balance, 100)
def test_resume_after_freeze_service(self):
host, public_port, private_port = self.network.api_address(0)
client = ExonumClient(host, public_port, private_port)
# Create wallet
with ExonumCryptoAdvancedClient(client) as crypto_client:
alice_keys = KeyPair.generate()
crypto_client.create_wallet(alice_keys, "Alice")
with client.create_subscriber("transactions") as subscriber:
subscriber.wait_for_new_event()
alice_balance = crypto_client.get_balance(alice_keys)
self.assertEqual(alice_balance, 100)
# Freeze the service
instances = {"crypto": {"artifact": "cryptocurrency", "action": "freeze"}}
cryptocurrency_advanced_config_dict = generate_config(self.network, instances=instances, artifact_action="none")
cryptocurrency_advanced_config = Configuration(cryptocurrency_advanced_config_dict)
with Launcher(cryptocurrency_advanced_config) as launcher:
launcher.deploy_all()
launcher.wait_for_deploy()
launcher.start_all()
launcher.wait_for_start()
# Resume the service
instances = {"crypto": {"artifact": "cryptocurrency", "action": "resume"}}
cryptocurrency_advanced_config_dict = generate_config(self.network, instances=instances, artifact_action="none")
cryptocurrency_advanced_config = Configuration(cryptocurrency_advanced_config_dict)
with Launcher(cryptocurrency_advanced_config) as launcher:
launcher.deploy_all()
launcher.wait_for_deploy()
launcher.start_all()
launcher.wait_for_start()
# Check that the service status has been changed to `active`.
for service in client.public_api.available_services().json()["services"]:
if service["spec"]["name"] == "crypto":
self.assertEqual(service["status"]["type"], "active")
# Check that an ability to create wallets has been restored.
with ExonumCryptoAdvancedClient(client) as crypto_client:
bob_keys = KeyPair.generate()
crypto_client.create_wallet(bob_keys, "Bob")
with client.create_subscriber("transactions") as subscriber:
subscriber.wait_for_new_event()
bob_balance = crypto_client.get_balance(bob_keys)
self.assertEqual(bob_balance, 100)
def _tear_down(self, check_exit_codes=True):
"""Performs cleanup, removing network files."""
if self.network is not None:
outputs = self.network.stop()
self.network.deinitialize()
self.network = None
if check_exit_codes:
assert_processes_exited_successfully(self, outputs)
def tearDown(self):
self._tear_down()
|
c382a9447b3704e994ae7ea07a6511c46b42c91d
|
f2d4c0eac2b12a64c499b533f3fe0883262f6293
|
/examples/mandelbrot.py
|
b2fef571ec7d88e7e5998a6198487b7c8abe5904
|
[
"MIT"
] |
permissive
|
mozman/svgwrite
|
716ef150734b42f42474fdaecc11eb646ac29bab
|
cd10a7ed1982de77ba85ff4556e154187d7f14bc
|
refs/heads/master
| 2022-08-10T03:19:14.453560
| 2022-07-14T13:59:39
| 2022-07-14T13:59:39
| 79,704,670
| 512
| 109
|
NOASSERTION
| 2022-07-14T13:34:27
| 2017-01-22T08:29:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,665
|
py
|
mandelbrot.py
|
#!/usr/bin/env python
#coding:utf-8
# Author: mozman
# Purpose: svg examples
# Created: 08.09.2010
# Copyright (C) 2010, Manfred Moitzi
# License: MIT License
try:
import svgwrite
except ImportError:
# if svgwrite is not 'installed' append parent dir of __file__ to sys.path
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
import svgwrite
from svgwrite import rgb
def mandelbrot(name):
## {{{ http://code.activestate.com/recipes/577111/ (r2)
# Mandelbrot fractal
# FB - 201003254
def putpixel(pos, color):
mandelbrot_group.add(dwg.circle(center=pos, r=.5, fill=color))
# image size
imgx = 160
imgy = 100
# drawing defines the output size
dwg = svgwrite.Drawing(name, ('32cm', '20cm'), debug=True)
# define a user coordinate system with viewbox()
dwg.viewbox(0, 0, imgx, imgy)
mandelbrot_group = dwg.add(dwg.g(stroke_width=0, stroke='none'))
# drawing area
xa = -2.0
xb = 1.0
ya = -1.5
yb = 1.5
maxIt = 255 # max iterations allowed
for y in range(imgy):
zy = y * (yb - ya) / (imgy - 1) + ya
for x in range(imgx):
zx = x * (xb - xa) / (imgx - 1) + xa
z = zx + zy * 1j
c = z
for i in range(maxIt):
if abs(z) > 2: break
z = z * z + c
putpixel((x, y), rgb(i % 4 * 64, i % 8 * 32, i % 16 * 16))
dwg.save()
## end of http://code.activestate.com/recipes/577111/ }}}
if __name__ == '__main__':
mandelbrot("mandelbrot.svg")
|
b7bb383571c9f523fedb2430a0e5aac437878401
|
360ae1188ad79e71ccc72da0b9ae709bda678f91
|
/ryu/services/protocols/bgp/utils/bgp.py
|
9f28802c52827b492953bf2f1d3510516238f7a0
|
[
"Apache-2.0"
] |
permissive
|
faucetsdn/ryu
|
47b3523e7ccb381f3bdf2877a3f9f01cb1876054
|
d6cda4f427ff8de82b94c58aa826824a106014c2
|
refs/heads/master
| 2023-09-05T06:37:21.991029
| 2022-06-09T23:09:40
| 2022-06-09T23:09:40
| 2,945,007
| 385
| 215
|
Apache-2.0
| 2022-11-13T10:50:25
| 2011-12-09T03:43:50
|
Python
|
UTF-8
|
Python
| false
| false
| 10,693
|
py
|
bgp.py
|
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities related to bgp data types and models.
"""
import logging
import netaddr
from ryu.lib import ip
from ryu.lib.packet.bgp import (
BGPUpdate,
RF_IPv4_UC,
RF_IPv6_UC,
RF_IPv4_VPN,
RF_IPv6_VPN,
RF_L2_EVPN,
RF_IPv4_FLOWSPEC,
RF_IPv6_FLOWSPEC,
RF_VPNv4_FLOWSPEC,
RF_VPNv6_FLOWSPEC,
RF_L2VPN_FLOWSPEC,
RF_RTC_UC,
RouteTargetMembershipNLRI,
BGP_ATTR_TYPE_MULTI_EXIT_DISC,
BGPPathAttributeMultiExitDisc,
BGPPathAttributeMpUnreachNLRI,
BGPPathAttributeAs4Path,
BGPPathAttributeAs4Aggregator,
BGPPathAttributeUnknown,
BGP_ATTR_FLAG_OPTIONAL,
BGP_ATTR_FLAG_TRANSITIVE,
BGPTwoOctetAsSpecificExtendedCommunity,
BGPIPv4AddressSpecificExtendedCommunity,
BGPFourOctetAsSpecificExtendedCommunity,
BGPFlowSpecTrafficRateCommunity,
BGPFlowSpecTrafficActionCommunity,
BGPFlowSpecRedirectCommunity,
BGPFlowSpecTrafficMarkingCommunity,
BGPFlowSpecVlanActionCommunity,
BGPFlowSpecTPIDActionCommunity,
)
from ryu.services.protocols.bgp.info_base.rtc import RtcPath
from ryu.services.protocols.bgp.info_base.ipv4 import Ipv4Path
from ryu.services.protocols.bgp.info_base.ipv6 import Ipv6Path
from ryu.services.protocols.bgp.info_base.vpnv4 import Vpnv4Path
from ryu.services.protocols.bgp.info_base.vpnv6 import Vpnv6Path
from ryu.services.protocols.bgp.info_base.evpn import EvpnPath
from ryu.services.protocols.bgp.info_base.ipv4fs import IPv4FlowSpecPath
from ryu.services.protocols.bgp.info_base.ipv6fs import IPv6FlowSpecPath
from ryu.services.protocols.bgp.info_base.vpnv4fs import VPNv4FlowSpecPath
from ryu.services.protocols.bgp.info_base.vpnv6fs import VPNv6FlowSpecPath
from ryu.services.protocols.bgp.info_base.l2vpnfs import L2VPNFlowSpecPath
LOG = logging.getLogger('utils.bgp')
# RouteFamily to path sub-class mapping.
_ROUTE_FAMILY_TO_PATH_MAP = {RF_IPv4_UC: Ipv4Path,
RF_IPv6_UC: Ipv6Path,
RF_IPv4_VPN: Vpnv4Path,
RF_IPv6_VPN: Vpnv6Path,
RF_L2_EVPN: EvpnPath,
RF_IPv4_FLOWSPEC: IPv4FlowSpecPath,
RF_IPv6_FLOWSPEC: IPv6FlowSpecPath,
RF_VPNv4_FLOWSPEC: VPNv4FlowSpecPath,
RF_VPNv6_FLOWSPEC: VPNv6FlowSpecPath,
RF_L2VPN_FLOWSPEC: L2VPNFlowSpecPath,
RF_RTC_UC: RtcPath}
def create_path(src_peer, nlri, **kwargs):
route_family = nlri.ROUTE_FAMILY
assert route_family in _ROUTE_FAMILY_TO_PATH_MAP.keys()
path_cls = _ROUTE_FAMILY_TO_PATH_MAP.get(route_family)
return path_cls(src_peer, nlri, src_peer.version_num, **kwargs)
def clone_path_and_update_med_for_target_neighbor(path, med):
assert path and med
route_family = path.route_family
if route_family not in _ROUTE_FAMILY_TO_PATH_MAP.keys():
raise ValueError('Clone is not supported for address-family %s' %
route_family)
path_cls = _ROUTE_FAMILY_TO_PATH_MAP.get(route_family)
pattrs = path.pathattr_map
pattrs[BGP_ATTR_TYPE_MULTI_EXIT_DISC] = BGPPathAttributeMultiExitDisc(med)
return path_cls(
path.source, path.nlri, path.source_version_num,
pattrs=pattrs, nexthop=path.nexthop,
is_withdraw=path.is_withdraw,
med_set_by_target_neighbor=True
)
def clone_rtcpath_update_rt_as(path, new_rt_as):
"""Clones given RT NLRI `path`, and updates it with new RT_NLRI AS.
Parameters:
- `path`: (Path) RT_NLRI path
- `new_rt_as`: AS value of cloned paths' RT_NLRI
"""
assert path and new_rt_as
if not path or path.route_family != RF_RTC_UC:
raise ValueError('Expected RT_NLRI path')
old_nlri = path.nlri
new_rt_nlri = RouteTargetMembershipNLRI(new_rt_as, old_nlri.route_target)
return RtcPath(path.source, new_rt_nlri, path.source_version_num,
pattrs=path.pathattr_map, nexthop=path.nexthop,
is_withdraw=path.is_withdraw)
def from_inet_ptoi(bgp_id):
"""Convert an IPv4 address string format to a four byte long.
"""
four_byte_id = None
try:
four_byte_id = ip.ipv4_to_int(bgp_id)
except ValueError:
LOG.debug('Invalid bgp id given for conversion to integer value %s',
bgp_id)
return four_byte_id
def get_unknown_opttrans_attr(path):
"""Utility method that gives a `dict` of unknown and unsupported optional
transitive path attributes of `path`.
Returns dict: <key> - attribute type code, <value> - unknown path-attr.
"""
path_attrs = path.pathattr_map
unknown_opt_tran_attrs = {}
for _, attr in path_attrs.items():
if (isinstance(attr, BGPPathAttributeUnknown) and
attr.flags & (BGP_ATTR_FLAG_OPTIONAL |
BGP_ATTR_FLAG_TRANSITIVE)) or \
isinstance(attr, BGPPathAttributeAs4Path) or \
isinstance(attr, BGPPathAttributeAs4Aggregator):
unknown_opt_tran_attrs[attr.type] = attr
return unknown_opt_tran_attrs
def create_end_of_rib_update():
"""Construct end-of-rib (EOR) Update instance."""
mpunreach_attr = BGPPathAttributeMpUnreachNLRI(RF_IPv4_VPN.afi,
RF_IPv4_VPN.safi,
[])
eor = BGPUpdate(path_attributes=[mpunreach_attr])
return eor
# Bgp update message instance that can used as End of RIB marker.
UPDATE_EOR = create_end_of_rib_update()
def create_rt_extended_community(value, subtype=2):
"""
Creates an instance of the BGP Route Target Community (if "subtype=2")
or Route Origin Community ("subtype=3").
:param value: String of Route Target or Route Origin value.
:param subtype: Subtype of Extended Community.
:return: An instance of Route Target or Route Origin Community.
"""
global_admin, local_admin = value.split(':')
local_admin = int(local_admin)
if global_admin.isdigit() and 0 <= int(global_admin) <= 0xffff:
ext_com = BGPTwoOctetAsSpecificExtendedCommunity(
subtype=subtype,
as_number=int(global_admin),
local_administrator=local_admin)
elif global_admin.isdigit() and 0xffff < int(global_admin) <= 0xffffffff:
ext_com = BGPFourOctetAsSpecificExtendedCommunity(
subtype=subtype,
as_number=int(global_admin),
local_administrator=local_admin)
elif ip.valid_ipv4(global_admin):
ext_com = BGPIPv4AddressSpecificExtendedCommunity(
subtype=subtype,
ipv4_address=global_admin,
local_administrator=local_admin)
else:
raise ValueError(
'Invalid Route Target or Route Origin value: %s' % value)
return ext_com
def create_v4flowspec_actions(actions=None):
"""
Create list of traffic filtering actions
for Ipv4 Flow Specification and VPNv4 Flow Specification.
`` actions`` specifies Traffic Filtering Actions of
Flow Specification as a dictionary type value.
Returns a list of extended community values.
"""
from ryu.services.protocols.bgp.api.prefix import (
FLOWSPEC_ACTION_TRAFFIC_RATE,
FLOWSPEC_ACTION_TRAFFIC_ACTION,
FLOWSPEC_ACTION_REDIRECT,
FLOWSPEC_ACTION_TRAFFIC_MARKING,
)
# Supported action type for IPv4 and VPNv4.
action_types = {
FLOWSPEC_ACTION_TRAFFIC_RATE: BGPFlowSpecTrafficRateCommunity,
FLOWSPEC_ACTION_TRAFFIC_ACTION: BGPFlowSpecTrafficActionCommunity,
FLOWSPEC_ACTION_REDIRECT: BGPFlowSpecRedirectCommunity,
FLOWSPEC_ACTION_TRAFFIC_MARKING: BGPFlowSpecTrafficMarkingCommunity,
}
return _create_actions(actions, action_types)
def create_v6flowspec_actions(actions=None):
"""
Create list of traffic filtering actions
for Ipv6 Flow Specification and VPNv6 Flow Specification.
"FLOWSPEC_ACTION_REDIRECT_IPV6" is not implemented yet.
"""
from ryu.services.protocols.bgp.api.prefix import (
FLOWSPEC_ACTION_TRAFFIC_RATE,
FLOWSPEC_ACTION_TRAFFIC_ACTION,
FLOWSPEC_ACTION_REDIRECT,
FLOWSPEC_ACTION_TRAFFIC_MARKING,
)
# Supported action type for IPv6 and VPNv6.
action_types = {
FLOWSPEC_ACTION_TRAFFIC_RATE: BGPFlowSpecTrafficRateCommunity,
FLOWSPEC_ACTION_TRAFFIC_ACTION: BGPFlowSpecTrafficActionCommunity,
FLOWSPEC_ACTION_REDIRECT: BGPFlowSpecRedirectCommunity,
FLOWSPEC_ACTION_TRAFFIC_MARKING: BGPFlowSpecTrafficMarkingCommunity,
}
return _create_actions(actions, action_types)
def create_l2vpnflowspec_actions(actions=None):
"""
Create list of traffic filtering actions for L2VPN Flow Specification.
"""
from ryu.services.protocols.bgp.api.prefix import (
FLOWSPEC_ACTION_TRAFFIC_RATE,
FLOWSPEC_ACTION_TRAFFIC_ACTION,
FLOWSPEC_ACTION_REDIRECT,
FLOWSPEC_ACTION_TRAFFIC_MARKING,
FLOWSPEC_ACTION_VLAN,
FLOWSPEC_ACTION_TPID,
)
# Supported action type for L2VPN.
action_types = {
FLOWSPEC_ACTION_TRAFFIC_RATE: BGPFlowSpecTrafficRateCommunity,
FLOWSPEC_ACTION_TRAFFIC_ACTION: BGPFlowSpecTrafficActionCommunity,
FLOWSPEC_ACTION_REDIRECT: BGPFlowSpecRedirectCommunity,
FLOWSPEC_ACTION_TRAFFIC_MARKING: BGPFlowSpecTrafficMarkingCommunity,
FLOWSPEC_ACTION_VLAN: BGPFlowSpecVlanActionCommunity,
FLOWSPEC_ACTION_TPID: BGPFlowSpecTPIDActionCommunity,
}
return _create_actions(actions, action_types)
def _create_actions(actions, action_types):
communities = []
if actions is None:
return communities
for name, action in actions.items():
cls_ = action_types.get(name, None)
if cls_:
communities.append(cls_(**action))
else:
raise ValueError(
'Unsupported flowspec action %s' % name)
return communities
|
fa99e2e07b57ab4b33fe610795bd8f2a0bd7fe7e
|
7f3967e139e1ecb6c17b63d7f6ca907caea4928a
|
/examples/tenant_multi_types/tenant_multi_types_tutorial/urls_type2.py
|
0c75d9c98675b66ae376f60493e4e64dbb27d5f1
|
[
"MIT"
] |
permissive
|
django-tenants/django-tenants
|
e1a274d6b74093b00f07bb1c5e6eb5c8849bc468
|
bfba21005e2cbfcae5789f0b60bec49f864845a8
|
refs/heads/master
| 2023-08-28T13:10:46.712377
| 2023-08-14T08:15:18
| 2023-08-14T08:15:18
| 37,154,540
| 773
| 205
|
MIT
| 2023-09-12T11:45:41
| 2015-06-09T19:48:59
|
Python
|
UTF-8
|
Python
| false
| false
| 236
|
py
|
urls_type2.py
|
from tenant_type_two_only.views import TenantTypeTwoView
from django.urls import path
from django.contrib import admin
urlpatterns = [
path('', TenantTypeTwoView.as_view(), name="index"),
path('admin/', admin.site.urls),
]
|
cf6c5d237428b719e029c2a6540e8ff3585e44c2
|
759bcef1302721fc24819510c2f4fd38a6cf7dad
|
/app/base.py
|
032a5ee7c5ae4d9a73276b78a6680bf224bdf868
|
[
"BSD-2-Clause"
] |
permissive
|
Crapworks/ceph-dash
|
68adfd460615cec238a9adadd561ef9b58d9371d
|
00d354beeb9ae92ac75d789c598621048a550a96
|
refs/heads/master
| 2023-08-25T07:58:53.532941
| 2023-08-11T09:33:33
| 2023-08-11T09:33:33
| 17,280,701
| 435
| 156
|
BSD-2-Clause
| 2023-08-11T09:33:34
| 2014-02-28T08:57:18
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 583
|
py
|
base.py
|
from flask import Blueprint
from flask.views import MethodView
class ApiResource(MethodView):
endpoint = None
url_prefix = None
url_rules = {}
@classmethod
def as_blueprint(cls, name=None):
name = name or cls.endpoint
bp = Blueprint(name, cls.__module__, url_prefix=cls.url_prefix)
for endpoint, options in cls.url_rules.items():
url_rule = options.get('rule', '')
defaults = options.get('defaults', {})
bp.add_url_rule(url_rule, defaults=defaults, view_func=cls.as_view(endpoint))
return bp
|
9fe9bd9b3c2654df1b41eda9308f8987a008a884
|
36437b397a855f3986325f1bfe41d7ced00b703a
|
/tests/persistence/test_outcomes.py
|
46e1185e45d108bb5f8d21d40976a5ff878ed5a7
|
[
"MIT"
] |
permissive
|
nolar/kopf
|
090cd21550e3a86e512a4c9150dfcf5f59ac14e4
|
538df59b88d1aab7b985d703483497f73c6c4783
|
refs/heads/main
| 2023-08-29T20:39:07.128912
| 2023-08-24T15:47:40
| 2023-08-24T15:47:40
| 288,234,242
| 1,627
| 154
|
MIT
| 2023-09-14T12:31:33
| 2020-08-17T16:45:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,289
|
py
|
test_outcomes.py
|
from kopf._core.actions.execution import Outcome, Result
def test_creation_for_ignored_handlers():
outcome = Outcome(final=True)
assert outcome.final
assert outcome.delay is None
assert outcome.result is None
assert outcome.exception is None
assert not outcome.subrefs
def test_creation_for_results():
result = Result(object())
outcome = Outcome(final=True, result=result)
assert outcome.final
assert outcome.delay is None
assert outcome.result is result
assert outcome.exception is None
assert not outcome.subrefs
def test_creation_for_permanent_errors():
error = Exception()
outcome = Outcome(final=True, exception=error)
assert outcome.final
assert outcome.delay is None
assert outcome.result is None
assert outcome.exception is error
assert not outcome.subrefs
def test_creation_for_temporary_errors():
error = Exception()
outcome = Outcome(final=False, exception=error, delay=123)
assert not outcome.final
assert outcome.delay == 123
assert outcome.result is None
assert outcome.exception is error
assert not outcome.subrefs
def test_creation_with_subrefs():
outcome = Outcome(final=True, subrefs=['sub1', 'sub2'])
assert outcome.subrefs == ['sub1', 'sub2']
|
e5594f4c9487efba1d3411e84c3e818966878d4a
|
c544b4a171b55fb98e2a17aa3068b599301585ac
|
/unittests/Source.py
|
ea2de5d1680002082f56625ca6849799ba1a29b2
|
[
"MIT"
] |
permissive
|
plastex/plastex
|
3c9dbbfd47a2cb83ef919bcfbb1b57723a8a6979
|
a882a62b81e6ae7b8c9454ae2b222ef5c2c14bb1
|
refs/heads/master
| 2023-07-20T04:53:20.325023
| 2023-07-03T10:03:34
| 2023-07-03T10:03:34
| 17,483,722
| 129
| 41
|
NOASSERTION
| 2023-09-04T10:49:23
| 2014-03-06T16:10:23
|
Python
|
UTF-8
|
Python
| false
| false
| 3,545
|
py
|
Source.py
|
import unittest, re
from unittest import TestCase
from plasTeX.TeX import TeX
from plasTeX import Macro
def normalize(s):
return re.sub(r'\s+', r' ', s).strip()
class Source(TestCase):
def testXYMatrix(self):
input = r'\xymatrix{A \ar[d]^b \ar[r]^a &B\ar[d]^c\\ C \ar[r]^d &D}'
s = TeX()
s.input(input)
output = s.parse()
source = normalize(output.source)
assert input.replace(" ", "") == source.replace(" ", ""), '"%s" != "%s"' % (input, source)
def testXYMatrix2(self):
input = r'\xymatrix{A \ar@{->>}[rd] \ar@{^{(}->}[r]&B \ar@{.>}[d]&C \ar@{_{(}->}[l]\ar@{->>}[ld]\\&D}'
s = TeX()
s.input(input)
output = s.parse()
source = normalize(output.source)
assert input.replace(" ", "") == source.replace(" ", ""), '"%s" != "%s"' % (input, source)
def testXYMatrix3(self):
input = r'\xymatrix{A \ar[r]^u_d \ar[rd]^u_d &B \ar[d]^u_d &C \ar[l]^u_d \ar[ld]^u_d\\&D}'
s = TeX()
s.input(input)
output = s.parse()
source = normalize(output.source)
assert input.replace(" ", "") == source.replace(" ", ""), '"%s" != "%s"' % (input, source)
def testList(self):
input = r'\begin{enumerate} \item one \item two \item three \end{enumerate}'
s = TeX()
s.input(input)
output = s.parse()
source = normalize(output.source)
assert input == source, '"%s" != "%s"' % (input, source)
input = r'\item one'
item = output[0].firstChild
source = normalize(item.source)
assert input == source, '"%s" != "%s"' % (input, source)
def testMath(self):
input = r'a $ x^{y_3} $ b'
s = TeX()
s.input(input)
output = s.parse()
source = normalize(output.source)
assert input == source, '"%s" != "%s"' % (input, source)
def testMathCal(self):
input = r'a $ \mathcal A $ b'
s = TeX()
s.input(input)
output = s.parse()
source = normalize(output.source)
assert input == source, '"%s" != "%s"' % (input, source)
def testDisplayMath(self):
input = r'a \[ x^{y_3} \]b'
s = TeX()
s.input(input)
output = s.parse()
source = normalize(output.source)
assert input == source, '"%s" != "%s"' % (input, source)
# \begin{displaymath} ... \end{displaymath} is transformed
# into \[ ...\]
input2 = r'a \begin{displaymath} x^{y_3} \end{displaymath}b'
s = TeX()
s.input(input2)
output = s.parse()
source = normalize(output.source)
assert input == source, '"%s" != "%s"' % (input, source)
def testSection(self):
input = r'\section{Heading 1} foo one \subsection{Heading 2} bar two'
s = TeX()
s.input(input)
output = s.parse()
source = normalize(output.source)
assert input == source, '"%s" != "%s"' % (input, source)
input = r'\subsection{Heading 2} bar two'
item = output[0].lastChild
source = normalize(item.source)
assert input == source, '"%s" != "%s"' % (input, source)
def testTabular(self):
input = r'\begin{tabular}{lll} \hline a & b & c \\[0.4in] 1 & 2 & 3 \end{tabular}'
s = TeX()
s.input(input)
output = s.parse()
source = normalize(output.source)
assert input == source, '"%s" != "%s"' % (input, source)
if __name__ == '__main__':
unittest.main()
|
63b7b609cad5029432de4149f7d7ea5050b3f3f5
|
5de6854181ce3fd2544d43644b3bb9abc923969b
|
/custom_components/samsungtv_custom/samsungctl_080b/art_mode.py
|
7009b34f87e2e0029edcceeea29895873ad32b8a
|
[
"Apache-2.0"
] |
permissive
|
roberodin/ha-samsungtv-custom
|
4b2b8a8f7f7a0c3da2725bb2d310029dd9595d5c
|
d28fa56707fdafde898eefc9afb2f9fdafdfe175
|
refs/heads/master
| 2023-08-03T04:17:37.238629
| 2023-07-30T00:12:32
| 2023-07-30T00:12:32
| 213,551,635
| 127
| 63
|
Apache-2.0
| 2023-07-30T00:12:33
| 2019-10-08T04:53:14
|
Python
|
UTF-8
|
Python
| false
| false
| 21,212
|
py
|
art_mode.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import threading
import logging
import json
from .utils import LogIt
from .websocket_base import AuxWebsocketBase
logger = logging.getLogger(__name__)
URL_FORMAT = "ws://{}:{}/api/v2/channels/com.samsung.art-app?name={}"
SSL_URL_FORMAT = "wss://{}:{}/api/v2/channels/com.samsung.art-app?name={}"
class ArtMode(AuxWebsocketBase):
def __init__(self, config):
AuxWebsocketBase.__init__(self, config, URL_FORMAT, SSL_URL_FORMAT)
def is_supported(self):
return self.sock is not None
@LogIt
def on_message(self, message):
response = json.loads(message)
logger.debug(
self.config.host,
' <---- art_mode: ' +
json.dumps(response, indent=4)
)
for callback, key, data in self._registered_callbacks[:]:
if key in response and (data is None or response[key] == data):
self._registered_callbacks.remove([callback, key, data])
callback(response)
break
else:
if 'params' in response and 'event' in response['params']:
event = response['params']['event']
if event == 'd2d_service_message':
data = json.loads(response['params']['data'])
if 'event' in data:
if data['event'] == 'go_to_standby':
print(json.dumps(data, indent=4))
elif data['event'] == 'wakeup':
print(json.dumps(data, indent=4))
elif data['event'] == 'art_mode_changed':
print(json.dumps(data, indent=4))
for callback, key, _ in self._registered_callbacks[:]:
if key == data['event']:
self._registered_callbacks.remove(
[callback, key, None]
)
callback(data)
break
def _build_art_app_request(self, request, value=None):
if value is None:
data = dict(
request=request,
id=self._id
)
else:
data = dict(
request=request,
value=value,
id=self._id
)
return dict(
clientIp=self._client_ip,
data=json.dumps(data),
deviceName=self._device_name,
event='art_app_request',
to='host'
)
@property
def motion_timer(self):
"""
{
"method":"ms.channel.emit",
"params":{
"clientIp":"192.168.1.20",
"data":"{
\"request\":\"get_motion_timer\",
\"id\":\"30852acd-1b7d-4496-8bef-53e1178fa839\"
}",
"deviceName":"W1Bob25lXWlQaG9uZQ==",
"event":"art_app_request",
"to":"host"
}
}
"""
params = self._build_art_app_request('get_motion_timer')
response = []
event = threading.Event()
def motion_timer_callback(data):
"""
{
"method":"ms.channel.emit",
"params":{
"clientIp":"127.0.0.1",
"data":"{
\"id\":\"259320d8-f368-48a4-bf03-789f24a22c0f\",
\"event\":\"motion_timer\",
\"value\":\"30\",
\"valid_values\":\"[\\\"off\\\",\\\"15\\\",\\\"30\\\",\\\"60\\\",\\\"120\\\",\\\"240\\\"]\\n\",
\"target_client_id\":\"84b12082-5f28-461e-8e81-b98ad1c1ffa\"
}",
"deviceName":"Smart Device",
"event":"d2d_service_message",
"to":"84b12082-5f28-461e-8e81-b98ad1c1ffa"
}
}
"""
valid_values = []
for item in data['valid_values']:
if item.isdigit():
item = int(item)
valid_values += [item]
if data['value'].isdigit():
data['value'] = int(data['value'])
response.append(
dict(
value=int(data['value']),
valid_values=valid_values[:]
)
)
event.set()
self.register_receive_callback(
motion_timer_callback,
'motion_timer',
None
)
sent = self.send('ms.channel.emit', **params)
if sent:
event.wait(2.0)
self.unregister_receive_callback(
motion_timer_callback,
'motion_timer',
None
)
if sent:
if not event.isSet():
logging.debug(
self.config.host +
' -- (get_motion_timer) timed out'
)
else:
return response[0]
@motion_timer.setter
def motion_timer(self, value):
"""
{
"method":"ms.channel.emit",
"params":{
"clientIp":"192.168.1.20",
"data":"{
\"id\":\"545fc0c1-bd9b-48f5-8444-02f9c519aaec\",
\"value\":\"off\",
\"request\":\"set_motion_timer\"
}",
"deviceName":"W1Bob25lXWlQaG9uZQ==",
"event":"art_app_request",
"to":"host"
}
}
"""
if value != 'off':
value = int(value)
res = self.motion_timer
if res and value in res['valid_values']:
params = self._build_art_app_request(
'set_motion_timer',
str(value)
)
self.send('ms.channel.emit', **params)
@property
def motion_sensitivity(self):
"""
{
"method":"ms.channel.emit",
"params":{
"clientIp":"192.168.1.20",
"data":"{
\"request\":\"get_motion_sensitivity\",
\"id\":\"30852acd-1b7d-4496-8bef-53e1178fa839\"
}",
"deviceName":"W1Bob25lXWlQaG9uZQ==",
"event":"art_app_request",
"to":"host"
}
}
"""
params = self._build_art_app_request('get_motion_sensitivity')
response = []
event = threading.Event()
def motion_sensitivity_callback(data):
"""
{
"method":"ms.channel.emit",
"params":{
"clientIp":"127.0.0.1",
"data":"{
\"id\":\"259320d8-f368-48a4-bf03-789f24a22c0f\",
\"event\":\"motion_sensitivity\",
\"value\":\"2\",
\"min\":\"1\",
\"max\":\"3\",
\"target_client_id\":\"84b12082-5f28-461e-8e81-b98ad1c1ffa\"
}",
"deviceName":"Smart Device",
"event":"d2d_service_message",
"to":"84b12082-5f28-461e-8e81-b98ad1c1ffa"
}
}
"""
response.append(
dict(
value=int(data['value']),
min=int(data['min']),
max=int(data['max'])
)
)
event.set()
self.register_receive_callback(
motion_sensitivity_callback,
'motion_sensitivity',
None
)
sent = self.send('ms.channel.emit', **params)
if sent:
event.wait(2.0)
self.unregister_receive_callback(
motion_sensitivity_callback,
'motion_sensitivity',
None
)
if sent:
if not event.isSet():
logging.debug(
self.config.host +
' -- (get_motion_sensitivity) timed out'
)
else:
return response[0]
@motion_sensitivity.setter
def motion_sensitivity(self, value):
"""
{
"method":"ms.channel.emit",
"params":{
"clientIp":"192.168.1.20",
"data":"{
\"id\":\"545fc0c1-bd9b-48f5-8444-02f9c519aaec\",
\"value\":\"2\",
\"request\":\"set_motion_sensitivity\"
}",
"deviceName":"W1Bob25lXWlQaG9uZQ==",
"event":"art_app_request",
"to":"host"
}
}
"""
value = int(value)
res = self.motion_sensitivity
if res and res['min'] <= value <= res['max']:
params = self._build_art_app_request(
'set_motion_sensitivity',
str(value)
)
self.send('ms.channel.emit', **params)
@property
def color_temperature(self):
"""
{
"method":"ms.channel.emit",
"params":{
"clientIp":"192.168.1.20",
"data":"{
\"request\":\"get_color_temperature\",
\"id\":\"30852acd-1b7d-4496-8bef-53e1178fa839\"
}",
"deviceName":"W1Bob25lXWlQaG9uZQ==",
"event":"art_app_request",
"to":"host"
}
}
"""
params = self._build_art_app_request('get_color_temperature')
response = []
event = threading.Event()
def color_temperature_callback(data):
"""
{
"method":"ms.channel.emit",
"params":{
"clientIp":"127.0.0.1",
"data":"{
\"id\":\"259320d8-f368-48a4-bf03-789f24a22c0f\",
\"event\":\"color_temperature\",
\"value\":\"2\",
\"min\":\"1\",
\"max\":\"3\",
\"target_client_id\":\"84b12082-5f28-461e-8e81-b98ad1c1ffa\"
}",
"deviceName":"Smart Device",
"event":"d2d_service_message",
"to":"84b12082-5f28-461e-8e81-b98ad1c1ffa"
}
}
"""
response.append(
dict(
value=int(data['value']),
min=int(data['min']),
max=int(data['max'])
)
)
event.set()
self.register_receive_callback(
color_temperature_callback,
'color_temperature',
None
)
sent = self.send('ms.channel.emit', **params)
if sent:
event.wait(2.0)
self.unregister_receive_callback(
color_temperature_callback,
'color_temperature',
None
)
if sent:
if not event.isSet():
logging.debug(
self.config.host +
' -- (get_color_temperature) timed out'
)
else:
return response[0]
@color_temperature.setter
def color_temperature(self, value):
"""
{
"method":"ms.channel.emit",
"params":{
"clientIp":"192.168.1.20",
"data":"{
\"id\":\"545fc0c1-bd9b-48f5-8444-02f9c519aaec\",
\"value\":\"2\",
\"request\":\"set_color_temperature\"
}",
"deviceName":"W1Bob25lXWlQaG9uZQ==",
"event":"art_app_request",
"to":"host"
}
}
"""
value = int(value)
res = self.color_temperature
if res and res['min'] <= value <= res['max']:
params = self._build_art_app_request(
'set_color_temperature',
str(value)
)
self.send('ms.channel.emit', **params)
@property
def brightness(self):
"""
{
"method":"ms.channel.emit",
"params":{
"clientIp":"192.168.1.20",
"data":"{
\"request\":\"get_brightness\",
\"id\":\"30852acd-1b7d-4496-8bef-53e1178fa839\"
}",
"deviceName":"W1Bob25lXWlQaG9uZQ==",
"event":"art_app_request",
"to":"host"
}
}
"""
params = self._build_art_app_request('get_brightness')
response = []
event = threading.Event()
def brightness_callback(data):
"""
{
"method":"ms.channel.emit",
"params":{
"clientIp":"127.0.0.1",
"data":"{
\"id\":\"259320d8-f368-48a4-bf03-789f24a22c0f\",
\"event\":\"brightness\",
\"value\":\"2\",
\"min\":\"1\",
\"max\":\"3\",
\"target_client_id\":\"84b12082-5f28-461e-8e81-b98ad1c1ffa\"
}",
"deviceName":"Smart Device",
"event":"d2d_service_message",
"to":"84b12082-5f28-461e-8e81-b98ad1c1ffa"
}
}
"""
response.append(
dict(
value=int(data['value']),
min=int(data['min']),
max=int(data['max'])
)
)
event.set()
self.register_receive_callback(
brightness_callback,
'brightness',
None
)
sent = self.send('ms.channel.emit', **params)
if sent:
event.wait(2.0)
self.unregister_receive_callback(
brightness_callback,
'brightness',
None
)
if sent:
if not event.isSet():
logger.debug('get_brightness: timed out')
else:
return response[0]
@brightness.setter
def brightness(self, value):
"""
{
"method":"ms.channel.emit",
"params":{
"clientIp":"192.168.1.20",
"data":"{
\"id\":\"545fc0c1-bd9b-48f5-8444-02f9c519aaec\",
\"value\":\"2\",
\"request\":\"set_brightness\"
}",
"deviceName":"W1Bob25lXWlQaG9uZQ==",
"event":"art_app_request",
"to":"host"
}
}
"""
value = int(value)
res = self.brightness
if res and res['min'] <= value <= res['max']:
params = self._build_art_app_request(
'set_brightness',
str(value)
)
self.send('ms.channel.emit', **params)
@property
def brightness_sensor(self):
"""
{
"method":"ms.channel.emit",
"params":{
"clientIp":"192.168.1.20",
"data":"{
\"request\":\"get_brightness_sensor_setting\",
\"id\":\"713fe2f1-2848-4161-b04c-18dd6753ecaf\"
}",
"deviceName":"W1Bob25lXWlQaG9uZQ==",
"event":"art_app_request",
"to":"host"
}
}
"""
params = self._build_art_app_request('get_brightness_sensor_setting')
response = []
event = threading.Event()
def brightness_sensor_callback(data):
"""
{
"method":"ms.channel.emit",
"params":{
"clientIp":"127.0.0.1",
"data":"{
\"id\":\"713fe2f1-2848-4161-b04c-18dd6753ecaf\",
\"event\":\"brightness_sensor_setting\",
\"value\":\"off\",
\"target_client_id\":\"de34a6-2b5f-46a0-ad19-f1a3d56167\"
}",
"deviceName":"Smart Device",
"event":"d2d_service_message",
"to":"de34a6-2b5f-46a0-ad19-f1a3d56167"
}
}
"""
if data['value'] == 'on':
response.append(True)
else:
response.append(False)
event.set()
self.register_receive_callback(
brightness_sensor_callback,
'brightness_sensor_setting',
None
)
sent = self.send('ms.channel.emit', **params)
if sent:
event.wait(2.0)
self.unregister_receive_callback(
brightness_sensor_callback,
'brightness_sensor_setting',
None
)
if sent:
if not event.isSet():
logging.debug(
self.config.host +
' -- (get_brightness_sensor_setting) timed out'
)
else:
return response[0]
@brightness_sensor.setter
def brightness_sensor(self, value):
"""
{
"method":"ms.channel.emit",
"params":{
"clientIp":"192.168.1.20",
"data":"{
\"id\":\"545fc0c1-bd9b-48f5-8444-02f9c519aaec\",
\"value\":\"on\",
\"request\":\"set_brightness_sensor_setting\"
}",
"deviceName":"W1Bob25lXWlQaG9uZQ==",
"event":"art_app_request",
"to":"host"
}
}
"""
params = self._build_art_app_request(
'set_brightness_sensor_setting',
'on' if value else 'off'
)
self.send('ms.channel.emit', **params)
@property
def artmode(self):
"""
{
"method":"ms.channel.emit",
"params":{
"clientIp":"192.168.1.20",
"data":"{
\"request\":
\"get_artmode_status\",
\"id\":\"30852acd-1b7d-4496-8bef-53e1178fa839\"
}",
"deviceName":"W1Bob25lXWlQaG9uZQ==",
"event":"art_app_request",
"to":"host"
}
}
"""
params = self._build_art_app_request('get_artmode_status')
response = []
event = threading.Event()
def artmode_callback(data):
"""
{
"method":"ms.channel.emit",
"params":{
"clientIp":"127.0.0.1",
"data":"{
\"id\":\"259320d8-f368-48a4-bf03-789f24a22c0f\",
\"event\":\"artmode_status\",
\"value\":\"off\",
\"target_client_id\":\"84b12082-5f28-461e-8e81-b98ad1c1ffa\"
}",
"deviceName":"Smart Device",
"event":"d2d_service_message",
"to":"84b12082-5f28-461e-8e81-b98ad1c1ffa"
}
}
"""
if data['value'] == 'on':
response.append(True)
else:
response.append(False)
event.set()
self.register_receive_callback(
artmode_callback,
'artmode_status',
None
)
sent = self.send('ms.channel.emit', **params)
if sent:
event.wait(3.0)
self.unregister_receive_callback(
artmode_callback,
'artmode_status',
None
)
if sent:
if not event.isSet():
logger.debug(
self.config.host +
' -- (get_artmode_status) timed out'
)
else:
return response[0]
@artmode.setter
def artmode(self, value):
"""
{
"method":"ms.channel.emit",
"params":{
"clientIp":"192.168.1.20",
"data":"{
\"id\":\"545fc0c1-bd9b-48f5-8444-02f9c519aaec\",
\"value\":\"on\",
\"request\":\"set_artmode_status\"
}",
"deviceName":"W1Bob25lXWlQaG9uZQ==",
"event":"art_app_request",
"to":"host"
}
}
"""
params = self._build_art_app_request(
'set_artmode_status',
'on' if value else 'off'
)
self.send('ms.channel.emit', **params)
|
9af12935b318146af2c6c39b741307612df1486d
|
ced3f2cdcfe770f2d018a4837c42326fb6e0d27d
|
/deeprobust/image/defense/LIDclassifier.py
|
aeebd68163bd08e4d55ef3460122e5df103ffc91
|
[
"MIT",
"CC0-1.0",
"GPL-1.0-or-later"
] |
permissive
|
DSE-MSU/DeepRobust
|
308a4c03887eb1394a6d68b64ac3d7837b32f395
|
d25d95b33724af9ab0385d5171c989f9b4ff2359
|
refs/heads/master
| 2023-08-11T00:42:08.091214
| 2023-06-29T13:24:19
| 2023-06-29T13:24:19
| 210,014,892
| 978
| 200
|
MIT
| 2023-09-11T02:56:14
| 2019-09-21T16:09:07
|
Python
|
UTF-8
|
Python
| false
| false
| 4,829
|
py
|
LIDclassifier.py
|
"""
This is an implementation of LID detector.
Currently this implementation is under testing.
References
----------
.. [1] Ma, Xingjun, Bo Li, Yisen Wang, Sarah M. Erfani, Sudanthi Wijewickrema, Grant Schoenebeck, Dawn Song, Michael E. Houle, and James Bailey. "Characterizing adversarial subspaces using local intrinsic dimensionality." arXiv preprint arXiv:1801.02613 (2018).
.. [2] Original code:t https://github.com/xingjunm/lid_adversarial_subspace_detection
Copyright (c) 2018 Xingjun Ma
"""
from deeprobust.image.netmodels.CNN_multilayer import Net
def train(self, device, train_loader, optimizer, epoch):
"""train process.
Parameters
----------
device :
device(option:'cpu', 'cuda')
train_loader :
train data loader
optimizer :
optimizer
epoch :
epoch
"""
self.model.train()
correct = 0
bs = train_loader.batch_size
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
data, target = data.to(device), target.to(device)
data_adv, output = self.adv_data(data, target, ep = self.epsilon, num_steps = self.num_steps)
loss = self.calculate_loss(output, target)
loss.backward()
optimizer.step()
pred = output.argmax(dim = 1, keepdim = True)
correct += pred.eq(target.view_as(pred)).sum().item()
#print every 10
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAccuracy:{:.2f}%'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item(), 100 * correct/(10*bs)))
correct = 0
def get_lid(model, X_test, X_test_noisy, X_test_adv, k, batch_size):
"""get_lid.
Parameters
----------
model :
model
X_test :
clean data
X_test_noisy :
noisy data
X_test_adv :
adversarial data
k :
k
batch_size :
batch_size
"""
funcs = [K.function([model.layers[0].input, K.learning_phase()], [out])
for out in get_layer_wise_activations(model, dataset)]
lid_dim = len(funcs)
print("Number of layers to estimate: ", lid_dim)
def estimate(i_batch):
start = i_batch * batch_size
end = np.minimum(len(X), (i_batch + 1) * batch_size)
n_feed = end - start
lid_batch = np.zeros(shape=(n_feed, lid_dim))
lid_batch_adv = np.zeros(shape=(n_feed, lid_dim))
lid_batch_noisy = np.zeros(shape=(n_feed, lid_dim))
for i, func in enumerate(funcs):
X_act = func([X[start:end], 0])[0]
X_act = np.asarray(X_act, dtype=np.float32).reshape((n_feed, -1))
# print("X_act: ", X_act.shape)
X_adv_act = func([X_adv[start:end], 0])[0]
X_adv_act = np.asarray(X_adv_act, dtype=np.float32).reshape((n_feed, -1))
# print("X_adv_act: ", X_adv_act.shape)
X_noisy_act = func([X_noisy[start:end], 0])[0]
X_noisy_act = np.asarray(X_noisy_act, dtype=np.float32).reshape((n_feed, -1))
# print("X_noisy_act: ", X_noisy_act.shape)
# random clean samples
# Maximum likelihood estimation of local intrinsic dimensionality (LID)
lid_batch[:, i] = mle_batch(X_act, X_act, k=k)
# print("lid_batch: ", lid_batch.shape)
lid_batch_adv[:, i] = mle_batch(X_act, X_adv_act, k=k)
# print("lid_batch_adv: ", lid_batch_adv.shape)
lid_batch_noisy[:, i] = mle_batch(X_act, X_noisy_act, k=k)
# print("lid_batch_noisy: ", lid_batch_noisy.shape)
return lid_batch, lid_batch_noisy, lid_batch_adv
lids = []
lids_adv = []
lids_noisy = []
n_batches = int(np.ceil(X.shape[0] / float(batch_size)))
for i_batch in tqdm(range(n_batches)):
lid_batch, lid_batch_noisy, lid_batch_adv = estimate(i_batch)
lids.extend(lid_batch)
lids_adv.extend(lid_batch_adv)
lids_noisy.extend(lid_batch_noisy)
# print("lids: ", lids.shape)
# print("lids_adv: ", lids_noisy.shape)
# print("lids_noisy: ", lids_noisy.shape)
lids_normal = np.asarray(lids, dtype=np.float32)
lids_noisy = np.asarray(lids_noisy, dtype=np.float32)
lids_adv = np.asarray(lids_adv, dtype=np.float32)
lids_pos = lids_adv
lids_neg = np.concatenate((lids_normal, lids_noisy))
artifacts, labels = merge_and_generate_labels(lids_pos, lids_neg)
return artifacts, labels
if __name__ == "__main__":
batch_size = 100
k_nearest = 20
#get LID characters
characters, labels = get_lid(model, X_test, X_test_noisy, X_test_adv, k_nearest, batch_size)
data = np.concatenate((characters, labels), axis = 1)
|
71d017be7902e8c73623d617ccc83391b8aba830
|
29705cfa764b8800a4f611044bb441ae2dbb517e
|
/examples/multiple_looper_example.py
|
9544095f6af34543ed0cd26a8903bfb39c1dab31
|
[
"MIT"
] |
permissive
|
ctpbee/ctpbee
|
98c720a54999e9c4bb242848a9cd4363f96ea2e1
|
217b73da65931213c1af4733741014d05b3a8bac
|
refs/heads/master
| 2023-03-16T12:47:01.260983
| 2023-03-13T05:49:51
| 2023-03-13T05:49:51
| 202,876,271
| 665
| 186
|
MIT
| 2023-09-12T12:33:29
| 2019-08-17T12:08:53
|
Python
|
UTF-8
|
Python
| false
| false
| 5,099
|
py
|
multiple_looper_example.py
|
from ctpbee.constant import Offset, TradeData, Direction
from ctpbee.indicator.indicator import ArrayManager
class DoubleMaStrategy(LooperApi):
def __init__(self, name):
super().__init__(name)
self.manager = ArrayManager(500)
self.instrument_set = ["rb2010.SHFE"]
self.fast_window = 10
self.slow_window = 20
self.pos = 0
self.open = False
self.price = []
self.open = False
self.open_price = None
self.buy = 0
self.sell = 0
self.slow = 30
self.fast = 15
def on_trade(self, trade: TradeData):
if trade.offset == Offset.OPEN:
if trade.direction == Direction.LONG:
self.buy += trade.volume
else:
self.sell += trade.volume
else:
if trade.direction == Direction.LONG:
self.sell -= trade.volume
else:
self.buy -= trade.volume
def on_bar(self, bar):
""" """
self.manager.update_bar(bar)
if not self.manager.inited:
return
fast_avg = self.manager.sma(self.fast, array=True)
slow_avg = self.manager.sma(self.slow, array=True)
if slow_avg[-2] < fast_avg[-2] and slow_avg[-1] >= fast_avg[-1]:
self.action.cover(bar.close_price, self.buy, bar)
self.action.sell(bar.close_price, 3, bar)
if fast_avg[-2] < slow_avg[-2] and fast_avg[-1] >= slow_avg[-1]:
self.action.sell(bar.close_price, self.sell, bar)
self.action.buy(bar.close_price, 3, bar)
def on_tick(self, tick):
pass
class OIDoubleMaStrategy(LooperApi):
def __init__(self, name):
super().__init__(name)
self.manager = ArrayManager(500)
self.instrument_set = ["OI2009.CZCE"]
self.fast_window = 10
self.slow_window = 20
self.pos = 0
self.open = False
self.price = []
self.open = False
self.open_price = None
self.buy = 0
self.sell = 0
self.slow = 30
self.fast = 15
def on_trade(self, trade: TradeData):
if trade.offset == Offset.OPEN:
if trade.direction == Direction.LONG:
self.buy += trade.volume
else:
self.sell += trade.volume
else:
if trade.direction == Direction.LONG:
self.sell -= trade.volume
else:
self.buy -= trade.volume
def on_bar(self, bar):
""" """
self.manager.add_data(bar)
if not self.manager.inited:
return
fast_avg = self.manager.sma(self.fast, array=True)
slow_avg = self.manager.sma(self.slow, array=True)
if slow_avg[-2] < fast_avg[-2] and slow_avg[-1] >= fast_avg[-1]:
self.action.cover(bar.close_price, self.buy, bar)
self.action.sell(bar.close_price, 3, bar)
if fast_avg[-2] < slow_avg[-2] and fast_avg[-1] >= slow_avg[-1]:
self.action.sell(bar.close_price, self.sell, bar)
self.action.buy(bar.close_price, 3, bar)
def on_tick(self, tick):
pass
if __name__ == '__main__':
from ctpbee import QADataSupport
data_support = QADataSupport()
runnning = Vessel()
strategy = DoubleMaStrategy("rb")
strategy_2 = OIDoubleMaStrategy("oi")
data = data_support.get_future_min("rb2010.SHFE", frq="1min", start="2020-04-01", end="2020-07-15")
data1 = data_support.get_future_min("OI009.CZCE", frq="1min", start="2020-04-01", end="2020-07-15")
runnning.add_data(data, data1)
params = {
"looper":
{
"initial_capital": 100000,
"deal_pattern": "price",
# 合约乘数
"size_map": {"rb2010.SHFE": 10,
"OI2009.CZCE": 10,
"FG2009.CZCE": 20,
},
# 手续费收费
"commission_ratio": {
"OI2009.CZCE": {"close": 0.00003, "close_today": 0},
"rb2010.SHFE": {"close": 0.0001, "close_today": 0.0001},
"FG2009.CZCE": {"close": 0.00001, "close_today": 0.00001},
},
# 保证金占用
"margin_ratio": {
"rb2010.SHFE": 0.1,
"OI2009.CZCE": 0.06,
"FG2009.CZCE": 0.05
},
"slippage_sell": 0,
"slippage_cover": 0,
"slippage_buy": 0,
"slippage_short": 0,
"close_pattern": "yesterday",
},
"strategy":
{
}
}
runnning.add_strategy(strategy)
runnning.add_strategy(strategy_2)
runnning.params = params
runnning.run()
result = runnning.get_result(report=True, auto_open=True)
|
00c1f6ec6beef14e86a07d5f5244669286a1d811
|
6181fcd4a266d963a0ee85971768c97922ca77cd
|
/src/garage/tf/optimizers/_dtypes.py
|
8f31d8666f901b854647d75f3cae6a1f178ac0da
|
[
"MIT"
] |
permissive
|
rlworkgroup/garage
|
5d215bbecb3a4e74b504988d6684a7b04df69a80
|
2d594803636e341660cab0e81343abbe9a325353
|
refs/heads/master
| 2023-08-21T22:58:49.338034
| 2023-01-04T06:06:27
| 2023-01-04T06:06:27
| 136,846,372
| 1,832
| 363
|
MIT
| 2023-09-11T11:36:40
| 2018-06-10T21:31:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,776
|
py
|
_dtypes.py
|
"""Data types for TensorFlow optimizers."""
class LazyDict:
"""An immutable, lazily-evaluated dict.
Args:
**kwargs (dict[Callable]): Initial lazy key-value pairs.
"""
def __init__(self, **kwargs):
self._lazy_dict = kwargs
self._dict = {}
def __getitem__(self, key):
"""See :meth:`object.__getitem__`.
Args:
key (Hashable): Key associated with the value to retrieve.
Returns:
object: Lazily-evaluated value of the :class:`Callable` associated
with key.
"""
if key not in self._dict:
self._dict[key] = self._lazy_dict[key]()
return self._dict[key]
def __setitem__(self, key, value):
"""See :meth:`object.__setitem__`.
Args:
key (Hashable): Key associated with value.
value (Callable): Function which returns the lazy value associated
with key.
"""
self.set(key, value)
def get(self, key, default=None):
"""See :meth:`dict.get`.
Args:
key (Hashable): Key associated with the value to retreive.
default (object): Value to return if key is not present in this
:class:`LazyDict`.
Returns:
object: Value associated with key if the key is present, otherwise
default.
"""
if key in self._lazy_dict:
return self[key]
return default
def set(self, key, value):
"""See :meth:`dict.set`.
Args:
key (Hashable): Key associated with value.
value (Callable): Function which returns the lazy value associated
with key.
"""
self._lazy_dict[key] = value
|
0105bd37902625d7e482212ffc032d3f0f94f2f1
|
38fff7bdefd8d62a740d51329b50d0e1e49258bb
|
/infra/manifest.py
|
256cf8dbdae97656d44e96f8febb9a45359e2f52
|
[
"Apache-2.0"
] |
permissive
|
google/oss-fuzz
|
026384c2ada61ef68b147548e830f60730c5e738
|
f0275421f84b8f80ee767fb9230134ac97cb687b
|
refs/heads/master
| 2023-08-31T23:30:28.157702
| 2023-08-31T21:49:30
| 2023-08-31T21:49:30
| 63,809,205
| 9,438
| 2,315
|
Apache-2.0
| 2023-09-14T20:32:19
| 2016-07-20T19:39:50
|
Shell
|
UTF-8
|
Python
| false
| false
| 2,150
|
py
|
manifest.py
|
#! /usr/bin/env python3
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""Script for pushing manifest files to docker that point to AMD64 and ARM
images."""
import logging
import subprocess
import sys
def push_manifest(image):
"""Pushes a manifest file in place of |image| for ARM and AMD64 versions of
that image."""
subprocess.run(['docker', 'pull', image], check=True)
amd64_image = f'{image}:manifest-amd64'
subprocess.run(['docker', 'tag', image, amd64_image], check=True)
subprocess.run(['docker', 'push', amd64_image], check=True)
arm_version = f'{image}-testing-arm'
subprocess.run(['docker', 'pull', arm_version], check=True)
arm64_image = f'{image}:manifest-arm64v8'
subprocess.run(['docker', 'tag', arm_version, arm64_image], check=True)
subprocess.run([
'docker', 'manifest', 'create', image, '--amend', arm64_image, '--amend',
amd64_image
],
check=True)
subprocess.run(['docker', 'manifest', 'push', image], check=True)
return True
def main():
"""Sets up manifests for base-builder and base-runner so they can be used for
ARM builds."""
logging.info('Doing simple gcloud command to ensure 2FA passes. '
'Otherwise docker push fails.')
subprocess.run(['gcloud', 'projects', 'list', '--limit=1'], check=True)
images = [
'gcr.io/oss-fuzz-base/base-builder', 'gcr.io/oss-fuzz-base/base-runner'
]
results = [push_manifest(image) for image in images]
return 0 if all(results) else 1
if __name__ == '__main__':
sys.exit(main())
|
7ea4bf4f72b05c366c0c2be1de9fd7b4dcf93d87
|
037d5d18b9b81205305e158d7d9fdad131d318cb
|
/tests/test_column_aliases.py
|
53af4fa99d8d21fc0832642fe808bf5db3afb856
|
[] |
permissive
|
kvesteri/sqlalchemy-continuum
|
ee7acf2c961b27eab3dd8f61598d9159d801ee21
|
a7a6bd7952185b1f82af985c0271834d886a617c
|
refs/heads/master
| 2023-08-24T09:14:33.515416
| 2022-11-17T05:41:09
| 2023-07-24T23:37:12
| 10,312,759
| 479
| 134
|
BSD-3-Clause
| 2023-09-12T20:07:04
| 2013-05-27T10:30:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,935
|
py
|
test_column_aliases.py
|
from pytest import mark
import sqlalchemy as sa
from sqlalchemy_continuum import version_class
from tests import TestCase, create_test_cases
class ColumnAliasesBaseTestCase(TestCase):
def create_models(self):
class TextItem(self.Model):
__tablename__ = 'text_item'
__versioned__ = {}
id = sa.Column(
'_id', sa.Integer, autoincrement=True, primary_key=True
)
name = sa.Column('_name', sa.Unicode(255))
self.TextItem = TextItem
@mark.skipif('True')
class TestVersionTableWithColumnAliases(ColumnAliasesBaseTestCase):
def test_column_reflection(self):
assert '_id' in version_class(self.TextItem).__table__.c
class ColumnAliasesTestCase(ColumnAliasesBaseTestCase):
def test_insert(self):
item = self.TextItem(name=u'Something')
self.session.add(item)
self.session.commit()
assert item.versions[0].name == u'Something'
def test_revert(self):
item = self.TextItem(name=u'Something')
self.session.add(item)
self.session.commit()
item.name = u'Some other thing'
self.session.commit()
item.versions[0].revert()
self.session.commit()
def test_previous_for_deleted_parent(self):
item = self.TextItem()
item.name = u'Some item'
item.content = u'Some content'
self.session.add(item)
self.session.commit()
self.session.delete(item)
self.session.commit()
TextItemVersion = version_class(self.TextItem)
versions = (
self.session.query(TextItemVersion)
.order_by(
getattr(
TextItemVersion,
self.options['transaction_column_name']
)
)
).all()
assert versions[1].previous.name == u'Some item'
create_test_cases(ColumnAliasesTestCase)
|
be777223a54908ad4008654c2ec7b46ef4e2066e
|
479a9c76b19b84d6cde69305828031cd2531aa56
|
/testing/MLDB-1933-subselect-flatten.py
|
6886ac6548698366def3ef77943fbe16d7648b8b
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mldbai/mldb
|
d36801bd99dd3f82d7557cd0f438b0121f63f22c
|
19bc4bc92a41ee8ad4eab0979dffd9c985d95758
|
refs/heads/master
| 2023-09-03T22:59:11.621839
| 2022-12-30T18:42:24
| 2022-12-30T18:42:24
| 47,634,692
| 701
| 107
|
Apache-2.0
| 2023-02-10T23:08:05
| 2015-12-08T16:34:16
|
C++
|
UTF-8
|
Python
| false
| false
| 1,141
|
py
|
MLDB-1933-subselect-flatten.py
|
#
# MLDB-1933-subselect-flatten.py
# Mathieu Marquis Bolduc, 2016-09-08
# This file is part of MLDB. Copyright 2016 mldb.ai inc. All rights reserved.
#
import unittest
from mldb import mldb, MldbUnitTest, ResponseException
class ColumnExprTest(MldbUnitTest): # noqa
def test_subselect(self):
res = mldb.query("SELECT COLUMN EXPR STRUCTURED (SELECT 1) FROM (SELECT [[2,3],[4,5]] as myembedding)")
expected = [["_rowName","myembedding"],["result",1]]
self.assertTableResultEquals(res, expected);
def test_subselect_multiple(self):
res = mldb.query("SELECT COLUMN EXPR STRUCTURED (SELECT 1) FROM (SELECT [2,3] as x,[4,5] as y)")
expected = [["_rowName", "x", "y"],
["result", 1, 1]]
self.assertTableResultEquals(res, expected);
def test_subselect_multiple_value(self):
res = mldb.query("SELECT COLUMN EXPR STRUCTURED (SELECT norm(value(), 2)) FROM (SELECT [2,3] as x,[4,5] as y)")
expected = [["_rowName", "x", "y"],
["result", 3.605551275463989, 6.4031242374328485]]
if __name__ == '__main__':
mldb.run_tests()
|
10f7c0a73477cd46919f73464f4687a150d7779e
|
d54a318a22a35dcfd7593837e55b83a9aa11d47d
|
/server-extension/jupyterlab_examples_server/handlers.py
|
2b1119524801b3218bba5a9912f9108860e77121
|
[
"BSD-3-Clause"
] |
permissive
|
jupyterlab/extension-examples
|
b67a696c51aea2e85a11409e7cb5b47bb76752eb
|
eba1de14b2ad7ef5c64faa89284f6e0112ee4491
|
refs/heads/main
| 2023-09-01T19:44:21.439787
| 2023-06-28T14:21:33
| 2023-06-28T14:21:33
| 214,395,915
| 351
| 157
|
BSD-3-Clause
| 2023-09-14T18:28:10
| 2019-10-11T09:24:23
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 1,604
|
py
|
handlers.py
|
import os
import json
from jupyter_server.base.handlers import APIHandler
from jupyter_server.utils import url_path_join
import tornado
from tornado.web import StaticFileHandler
class RouteHandler(APIHandler):
# The following decorator should be present on all verb methods (head, get, post,
# patch, put, delete, options) to ensure only authorized user can request the
# Jupyter server
@tornado.web.authenticated
def get(self):
self.finish(json.dumps({
"data": "This is /jupyterlab-examples-server/hello endpoint!"
}))
@tornado.web.authenticated
def post(self):
# input_data is a dictionary with a key "name"
input_data = self.get_json_body()
data = {"greetings": "Hello {}, enjoy JupyterLab!".format(input_data["name"])}
self.finish(json.dumps(data))
def setup_handlers(web_app):
host_pattern = ".*$"
base_url = web_app.settings["base_url"]
# Prepend the base_url so that it works in a JupyterHub setting
route_pattern = url_path_join(base_url, "jupyterlab-examples-server", "hello")
handlers = [(route_pattern, RouteHandler)]
web_app.add_handlers(host_pattern, handlers)
# Prepend the base_url so that it works in a JupyterHub setting
doc_url = url_path_join(base_url, "jupyterlab-examples-server", "public")
doc_dir = os.getenv(
"JLAB_SERVER_EXAMPLE_STATIC_DIR",
os.path.join(os.path.dirname(__file__), "public"),
)
handlers = [("{}/(.*)".format(doc_url), StaticFileHandler, {"path": doc_dir})]
web_app.add_handlers(host_pattern, handlers)
|
48dc4317cfa80f643a5f62e38959c1ee4aab3f89
|
a4ea525e226d6c401fdb87a6e9adfdc5d07e6020
|
/src/azure-cli/azure/cli/command_modules/cognitiveservices/tests/latest/test_cognitiveservices_command.py
|
51fa70b981ec0661f4a9fc13a004f5e68b02c1f8
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] |
permissive
|
Azure/azure-cli
|
13340eeca2e288e66e84d393fa1c8a93d46c8686
|
a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca
|
refs/heads/dev
| 2023-08-17T06:25:37.431463
| 2023-08-17T06:00:10
| 2023-08-17T06:00:10
| 51,040,886
| 4,018
| 3,310
|
MIT
| 2023-09-14T11:11:05
| 2016-02-04T00:21:51
|
Python
|
UTF-8
|
Python
| false
| false
| 5,946
|
py
|
test_cognitiveservices_command.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import unittest
from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer
from azure.cli.testsdk.scenario_tests import AllowLargeResponse
from knack.util import CLIError
class CognitiveServicesTests(ScenarioTest):
@ResourceGroupPreparer()
def test_cognitiveservices_crud(self, resource_group):
sname = self.create_random_name(prefix='cog', length=12)
tagname = self.create_random_name(prefix='tagname', length=15)
tagvalue = self.create_random_name(prefix='tagvalue', length=15)
self.kwargs.update({
'sname': sname,
'kind': 'FormRecognizer',
'sku': 'S0',
'location': 'SOUTHCENTRALUS',
'tags': tagname + '=' + tagvalue
})
# test to create cognitive services account
self.cmd('az cognitiveservices account create -n {sname} -g {rg} --kind {kind} --sku {sku} -l {location} --yes',
checks=[self.check('name', '{sname}'),
self.check('location', '{location}'),
self.check('sku.name', '{sku}')])
# test to show the details of cognitive services account
self.cmd('az cognitiveservices account show -n {sname} -g {rg}',
checks=[self.check('name', '{sname}'),
self.check('resourceGroup', '{rg}')])
# test to update the properties of cognitive services account
self.cmd('az cognitiveservices account update -n {sname} -g {rg} --sku {sku} --tags {tags}',
checks=[self.check('sku.name', '{sku}'),
self.check('tags', {tagname: tagvalue})])
# test to list keys of a cogntive services account
oldkeys = self.cmd('az cognitiveservices account keys list -n {sname} -g {rg}',
checks=[self.check('length(key1)', 32),
self.check('length(key2)', 32)]).get_output_in_json()
# test to regenerate the keys of a cognitive services account
newkeys = self.cmd('az cognitiveservices account keys regenerate -n {sname} -g {rg} --key-name Key1').get_output_in_json() # pylint: disable=line-too-long
self.assertNotEqual(oldkeys, newkeys)
# test to list cognitive service accounts under current resource group
self.cmd('az cognitiveservices account list -g {rg}', checks=[
self.check('length(@)', 1)])
# test to delete the cognitive services account
exitcode = self.cmd('az cognitiveservices account delete -n {sname} -g {rg}').exit_code
self.assertEqual(exitcode, 0)
@AllowLargeResponse()
@ResourceGroupPreparer()
def test_cognitiveservices_account_list_kinds(self, resource_group):
# test to list cognitive services account kinds
results = self.cmd('az cognitiveservices account list-kinds').get_output_in_json()
self.assertTrue(len(results) > 0)
self.assertTrue('Face' in results)
@ResourceGroupPreparer()
def test_cognitiveservices_account_list_skus_legacy(self, resource_group):
self.kwargs.update({
'name': self.create_random_name(prefix='cs_cli_test_', length=16),
'kind': 'FormRecognizer',
'sku': 'S0',
'location': 'SOUTHCENTRALUS'
})
self.cmd('az cognitiveservices account create -n {name} -g {rg} --kind {kind} --sku {sku} -l {location} --yes',
checks=[self.check('name', '{name}'),
self.check('location', '{location}'),
self.check('sku.name', '{sku}')])
results = self.cmd('az cognitiveservices account list-skus -n {name} -g {rg}').get_output_in_json()
self.assertTrue(isinstance(results['value'], list))
self.assertTrue(len(results['value']) > 0)
@AllowLargeResponse()
@ResourceGroupPreparer()
def test_cognitiveservices_account_list_skus(self, resource_group):
self.kwargs.update({
'kind': 'Face',
'location': 'westus'
})
results = self.cmd('az cognitiveservices account list-skus --kind {kind}').get_output_in_json()
self.assertTrue(isinstance(results, list))
self.assertTrue(len(results) > 0)
for sku in results:
self.assertTrue(sku['kind'] == self.kwargs['kind'])
results = self.cmd('az cognitiveservices account list-skus --kind {kind} --location {location}').get_output_in_json()
self.assertTrue(isinstance(results, list))
self.assertTrue(len(results) > 0)
for sku in results:
self.assertTrue(sku['kind'] == self.kwargs['kind'])
self.assertTrue(self.kwargs['location'].lower() in [x.lower() for x in sku['locations']])
@ResourceGroupPreparer()
def test_cognitiveservices_account_list_usage(self, resource_group):
self.kwargs.update({
'name': self.create_random_name(prefix='cs_cli_test_', length=16),
'kind': 'TextAnalytics',
'sku': 'S',
'location': 'SOUTHCENTRALUS'
})
self.cmd('az cognitiveservices account create -n {name} -g {rg} --kind {kind} --sku {sku} -l {location}',
checks=[self.check('name', '{name}'),
self.check('location', '{location}'),
self.check('sku.name', '{sku}')])
results = self.cmd('az cognitiveservices account list-usage -n {name} -g {rg}').get_output_in_json()
self.assertTrue(isinstance(results, list))
if __name__ == '__main__':
unittest.main()
|
8317d8dc77ce6935069adbe27ec52e920ba69c33
|
61004e474b7b2ad0071c16766f0f7874f04f9466
|
/tools/ml-auto-eda/ml_eda/reporting/report_generator.py
|
a19b365fc1cd1f4040bac1bdad17b39738b60e1d
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/professional-services
|
eb79751efae765a8c691a745e520f44f51bd715c
|
0f51121b945bd74c7f667e74e8861fceda87565c
|
refs/heads/main
| 2023-09-05T02:57:33.328973
| 2023-08-30T14:40:30
| 2023-08-30T14:40:30
| 91,730,359
| 2,626
| 1,381
|
Apache-2.0
| 2023-09-14T20:13:42
| 2017-05-18T19:29:27
|
Python
|
UTF-8
|
Python
| false
| false
| 7,390
|
py
|
report_generator.py
|
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate EDA report based on the performed analysis"""
from collections import OrderedDict
from typing import Text
import logging
import argparse
from markdown2 import markdown
from ml_eda.proto import analysis_entity_pb2
from ml_eda.orchestration.analysis_tracker import AnalysisTracker
from ml_eda.reporting import utils
from ml_eda.reporting.formatting import SectionMeta
from ml_eda.reporting.formatting import REPORT_STRUCTURE
from ml_eda.reporting.formatting import WARNING_STR, RECOMMEND_STR
from ml_eda.reporting.template import JOB_CONFIG_TEMPLATE
from ml_eda.reporting.template import SECTION_TITLE, SUB_SECTION_TITLE
from ml_eda.reporting.template import CLI_PARAM_LIST_TEMPLATE
from ml_eda.reporting.template import CLI_PARAM_TEMPLATE
Analysis = analysis_entity_pb2.Analysis
def create_report_contents(
analysis_tracker: AnalysisTracker,
figure_base_path: Text
) -> OrderedDict:
"""Generate the analysis report content based on all the analyze performed
Args:
analysis_tracker: (AnalysisTracker), holder for all the analysis
figure_base_path: (string), the folder for holding figures
Returns:
Dict -> {section_name: [section_contents]}
"""
# The content is stored in an OrderedDict
report_content = OrderedDict()
for section in REPORT_STRUCTURE:
report_content[section.section_name] = []
for section in REPORT_STRUCTURE:
# Only the dataset_info section has no title
if section.section_title is not None:
title = section.section_title_template.format(
content=section.section_title)
report_content[section.section_name].append(title)
# Check whether the content generation function is specified for a section
if section.section_content_generator is not None:
# Run the content generation function, the signature is consistent
content, additional = section.section_content_generator(
analysis_tracker, figure_base_path)
# Add generated content for the result holder
if content is not None:
report_content[section.section_name].append(content)
if (section.generate_recommend
and additional is not None
and RECOMMEND_STR in report_content):
report_content[RECOMMEND_STR].extend(additional)
elif (section.generate_warning
and additional is not None
and WARNING_STR in report_content):
report_content[WARNING_STR].extend(additional)
# Consolidate all warnings collected from analysis
if WARNING_STR in report_content and len(report_content[WARNING_STR]) > 1:
report_content[WARNING_STR] = [
report_content[WARNING_STR][-1],
utils.create_content_list(report_content[WARNING_STR][0:-1])]
# Consolidate all recommendations collected from analysis
if RECOMMEND_STR in report_content and len(report_content[RECOMMEND_STR]) > 1:
report_content[RECOMMEND_STR] = [
report_content[RECOMMEND_STR][-1],
utils.create_content_list(report_content[RECOMMEND_STR][0:-1])]
return report_content
def create_md_report(
analysis_tracker: AnalysisTracker,
figure_base_path: Text,
config_params: argparse.ArgumentParser
) -> Text:
# pylint: disable-msg=too-many-locals
"""Creat report based on all the analysis performed
Args:
analysis_tracker: (AnalysisTracker), holder for all the analysis
figure_base_path: (string), the folder for holding figures
config_params: runtime configuration from CLI
Returns:
Markdown formatted report in text
"""
def _check_empty_section(
r_contents: OrderedDict,
section_meta: SectionMeta
) -> bool:
"""Check whether the content of a section is empty. If a section has
has dependency as specified in its SectionMeta, all its dependencies need
to be checked to ensure its emptiness.
Args:
r_contents: generated report contents holder
section_meta: section metadata
Returns:
Boolean flag
"""
if section_meta.section_content_generator is not None:
if section_meta.section_title is None:
# if there is no title, nonempty content length should be larger than 1
return len(r_contents[section_meta.section_name]) < 1
# if there is title, nonempty content length should be larger than 2
return len(r_contents[section_meta.section_name]) < 2
if not section_meta.dependency:
return len(r_contents[section_meta.section_name]) < 2
check_list = list()
for item in section_meta.dependency:
# recursively check dependency
check_list.append(_check_empty_section(r_contents, item))
# all dependency need to be empty to claim the parent
# section to be empty
if all(check_list):
return True
return False
# Generate contents for each section
report_content = create_report_contents(
analysis_tracker=analysis_tracker,
figure_base_path=figure_base_path)
contents = []
# Put contents in correct order and add appropriate section title
# The `report_structure` define the overall structure
for section in REPORT_STRUCTURE:
if section.skip_if_no_content:
is_empty = _check_empty_section(report_content, section)
logging.info(
'Emptiness check for {}: {}'.format(section.section_name, is_empty))
if not is_empty:
contents.extend(report_content[section.section_name])
else:
contents.extend(report_content[section.section_name])
if config_params.add_config_to_report:
config_title = SECTION_TITLE.format(content='Configurations')
contents.append(config_title)
with open(config_params.job_config, 'r') as f:
job_config_title = SUB_SECTION_TITLE.format(content='job_config.ini')
config_content = JOB_CONFIG_TEMPLATE.format(
config_content=''.join(f.readlines()))
contents.extend([job_config_title, config_content])
cli_list_template = CLI_PARAM_LIST_TEMPLATE
config_dict = config_params.__dict__
cli_title = SUB_SECTION_TITLE.format(content='cli params')
param_content = '\n'.join(
[cli_list_template.format(param=param, value=config_dict[param])
for param in config_dict])
cli_content = CLI_PARAM_TEMPLATE.format(param_content=param_content)
contents.extend([cli_title, cli_content])
return ''.join(contents)
def create_html_report_from_markdown(
markdown_content: Text
) -> Text:
"""Creat report based on all the analysis performed
Args:
markdown_content: markdown format content.
Returns:
HTML formatted report in text
"""
extras = ['cuddled-list', 'tables', 'fenced-code-blocks']
raw_html = markdown(markdown_content, extras=extras)
return raw_html
|
cd0e436fa82a202c90ff17db5e230c2c8488dc51
|
dd85fd91b7bab66088daaae247b04ca3385523d8
|
/sarpy/consistency/sicd_consistency.py
|
22c290ef1ac4b3bcf33d44af143d5e9c4ae8ca12
|
[
"MIT",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-public-domain"
] |
permissive
|
ngageoint/sarpy
|
37a182fdae6bc40221f4d03cd81467f015e1529a
|
de1b1886f161a83b6c89aadc7a2c7cfc4892ef81
|
refs/heads/master
| 2023-08-21T11:04:01.776416
| 2023-08-07T19:24:44
| 2023-08-07T19:24:44
| 126,400,199
| 192
| 72
|
MIT
| 2023-08-29T20:16:46
| 2018-03-22T22:03:23
|
Python
|
UTF-8
|
Python
| false
| false
| 15,087
|
py
|
sicd_consistency.py
|
"""
A module for performing a selection of validation checks on a SICD (nitf) file,
or the xml file containing the sicd structure.
Use the `check_file` function directly, or perform using the command line
>>> python -m sarpy.consistency.sicd_consistency <file_name>
For more information, about command line usage, see
>>> python -m sarpy.consistency.sicd_consistency --help
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import logging
import sys
import argparse
import os
from typing import Union
from sarpy.io.xml.base import parse_xml_from_string, validate_xml_from_string
from sarpy.io.general.nitf import NITFDetails
from sarpy.io.general.nitf_elements.des import DataExtensionHeader, \
DataExtensionHeader0
from sarpy.io.complex.sicd import SICDReader, SICDDetails, extract_clas
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.complex.sicd_schema import get_urn_details, get_schema_path, \
get_specification_identifier
logger = logging.getLogger('validation')
def evaluate_xml_versus_schema(xml_string, urn_string):
"""
Check validity of the xml string versus the appropriate schema.
Parameters
----------
xml_string : str|bytes
urn_string : str
Returns
-------
None|bool
"""
try:
the_schema = get_schema_path(urn_string)
except KeyError:
logger.exception('SICD: Failed getting the schema for urn {}'.format(urn_string))
return False
try:
return validate_xml_from_string(xml_string, the_schema, output_logger=logger)
except ImportError:
return None
def _evaluate_xml_string_validity(xml_string):
"""
Check the validity of the SICD xml, as defined by the given string.
Parameters
----------
xml_string : str|bytes
Returns
-------
(bool, str, SICDType)
"""
root_node, xml_ns = parse_xml_from_string(xml_string)
if xml_ns is None:
raise ValueError(
'SICD XML invalid, because no apparent namespace defined in the xml,\n\t'
'which starts `{}...`'.format(xml_string[:15]))
if 'default' not in xml_ns:
raise ValueError(
'Could not properly interpret the namespace collection from xml\n{}'.format(xml_ns))
sicd_urn = xml_ns['default']
# check that our urn is mapped
try:
_ = get_urn_details(sicd_urn)
check_schema = True
except Exception as e:
logger.exception('SICD: The SICD namespace has unrecognized value')
check_schema = False
valid_xml = None
if check_schema:
valid_xml = evaluate_xml_versus_schema(xml_string, sicd_urn)
if valid_xml is None:
valid_xml = True
# perform the various sicd structure checks
the_sicd = SICDType.from_node(root_node, xml_ns=xml_ns)
valid_sicd_contents = the_sicd.is_valid(recursive=True, stack=False)
return valid_xml & valid_sicd_contents, sicd_urn, the_sicd
def check_sicd_data_extension(nitf_details, des_header, xml_string):
"""
Evaluate a SICD data extension for validity.
Parameters
----------
nitf_details : NITFDetails
des_header : DataExtensionHeader|DataExtensionHeader0
xml_string : str|bytes
Returns
-------
(bool, SICDType)
"""
def check_des_header_fields():
# type: () -> bool
des_id = des_header.DESID.strip() if nitf_details.nitf_version == '02.10' else des_header.DESTAG.strip()
if des_id != 'XML_DATA_CONTENT':
logger.warning('SICD: Found old style SICD DES Header. This is deprecated.')
return True
# make sure that the NITF urn is evaluated for sensibility
nitf_urn = des_header.UserHeader.DESSHTN.strip()
try:
nitf_urn_details = get_urn_details(nitf_urn)
except Exception:
logger.exception('SICD: The SICD DES.DESSHTN must be a recognized urn')
return False
# make sure that the NITF urn and SICD urn actually agree
header_good = True
if nitf_urn != xml_urn:
logger.error('SICD: The SICD DES.DESSHTN ({}) and urn ({}) must agree'.format(nitf_urn, xml_urn))
header_good = False
# make sure that the NITF DES fields are populated appropriately for NITF urn
if des_header.UserHeader.DESSHSI.strip() != get_specification_identifier():
logger.error(
'SICD: DES.DESSHSI has value `{}`,\n\tbut should have value `{}`'.format(
des_header.UserHeader.DESSHSI.strip(), get_specification_identifier()))
header_good = False
nitf_version = nitf_urn_details['version']
if des_header.UserHeader.DESSHSV.strip() != nitf_version:
logger.error(
'SICD: DES.DESSHSV has value `{}`,\n\tbut should have value `{}` based on DES.DESSHTN `{}`'.format(
des_header.UserHeader.DESSHSV.strip(), nitf_version, nitf_urn))
header_good = False
nitf_date = nitf_urn_details['date']
if des_header.UserHeader.DESSHSD.strip() != nitf_date:
logger.warning(
'SICD: DES.DESSHSD has value `{}`,\n\tbut should have value `{}` based on DES.DESSHTN `{}`'.format(
des_header.UserHeader.DESSHSD.strip(), nitf_date, nitf_urn))
return header_good
def compare_sicd_class():
# type: () -> bool
if the_sicd.CollectionInfo is None or the_sicd.CollectionInfo.Classification is None:
logger.error(
'SICD: SICD.CollectionInfo.Classification is not populated,\n\t'
'so can not be compared with SICD DES.DESCLAS `{}`'.format(des_header.Security.CLAS.strip()))
return False
sicd_class = the_sicd.CollectionInfo.Classification
extracted_class = extract_clas(the_sicd)
if extracted_class != des_header.Security.CLAS.strip():
logger.warning(
'SICD: DES.DESCLAS is `{}`,\n\tand SICD.CollectionInfo.Classification '
'is {}'.format(des_header.Security.CLAS.strip(), sicd_class))
if des_header.Security.CLAS.strip() != nitf_details.nitf_header.Security.CLAS.strip():
logger.warning(
'SICD: DES.DESCLAS is `{}`,\n\tand NITF.CLAS is `{}`'.format(
des_header.Security.CLAS.strip(), nitf_details.nitf_header.Security.CLAS.strip()))
return True
# check sicd xml structure for validity
valid_sicd, xml_urn, the_sicd = _evaluate_xml_string_validity(xml_string)
# check that the sicd information and header information appropriately match
valid_header = check_des_header_fields()
# check that the classification seems to make sense
valid_class = compare_sicd_class()
return valid_sicd & valid_header & valid_class, the_sicd
def check_sicd_file(nitf_details):
"""
Check the validity of the given NITF file as a SICD file.
Parameters
----------
nitf_details : str|NITFDetails
The path to the NITF file, or a `NITFDetails` object.
Returns
-------
bool
"""
def check_data_extension_headers():
# type: () -> (str, Union[DataExtensionHeader, DataExtensionHeader0])
sicd_des = []
for i in range(nitf_details.des_subheader_offsets.size):
subhead_bytes = nitf_details.get_des_subheader_bytes(i)
des_bytes = None
if subhead_bytes.startswith(b'DEXML_DATA_CONTENT'):
des_bytes = nitf_details.get_des_bytes(i)
elif subhead_bytes.startswith(b'DESIDD_XML'):
raise ValueError(
'This file contains an old format SIDD DES, and should be a SIDD file')
elif subhead_bytes.startswith(b'DESICD_XML'):
des_bytes = nitf_details.get_des_bytes(i)
if des_bytes is None:
continue
# compare the SICD structure and the des header structure
if nitf_details.nitf_version == '02.00':
des_header = DataExtensionHeader0.from_bytes(subhead_bytes, start=0)
elif nitf_details.nitf_version == '02.10':
des_header = DataExtensionHeader.from_bytes(subhead_bytes, start=0)
else:
raise ValueError('Got unhandled NITF version {}'.format(nitf_details.nitf_version))
try:
des_bytes = des_bytes.decode('utf-8').strip().encode()
root_node, xml_ns = parse_xml_from_string(des_bytes)
# namespace makes this ugly
if 'SIDD' in root_node.tag:
raise ValueError(
'This file contains a SIDD DES, and should be a SIDD file')
elif 'SICD' in root_node.tag:
sicd_des.append((i, des_bytes, des_header))
except Exception as e:
logger.error('Failed parsing the xml DES entry {} as xml'.format(i))
raise e
if len(sicd_des) == 0:
raise ValueError('No SICD DES values found, so this is not a viable SICD file')
elif len(sicd_des) > 1:
raise ValueError(
'Multiple SICD DES values found at indices {},\n'
'so this is not a viable SICD file'.format([entry[0] for entry in sicd_des]))
return sicd_des[0][1], sicd_des[0][2]
def check_image_data():
# type: () -> bool
# get pixel type
pixel_type = the_sicd.ImageData.PixelType
if pixel_type == 'RE32F_IM32F':
exp_nbpp = 32
exp_pvtype = 'R'
elif pixel_type == 'RE16I_IM16I':
exp_nbpp = 16
exp_pvtype = 'SI'
elif pixel_type == 'AMP8I_PHS8I':
exp_nbpp = 8
exp_pvtype = 'INT'
else:
raise ValueError('Got unexpected pixel type {}'.format(pixel_type))
valid_images = True
# verify that all images have the correct pixel type
for i, img_header in enumerate(nitf_details.img_headers):
if img_header.ICAT.strip() != 'SAR':
valid_images = False
logger.error(
'SICD: image segment at index {} of {} has ICAT = `{}`,\n\texpected to be `SAR`'.format(
i, len(nitf_details.img_headers), img_header.ICAT.strip()))
if img_header.PVTYPE.strip() != exp_pvtype:
valid_images = False
logger.error(
'SICD: image segment at index {} of {} has PVTYPE = `{}`,\n\t'
'expected to be `{}` based on pixel type {}'.format(
i, len(nitf_details.img_headers), img_header.PVTYPE.strip(), exp_pvtype, pixel_type))
if img_header.NBPP != exp_nbpp:
valid_images = False
logger.error(
'SICD: image segment at index {} of {} has NBPP = `{}`,\n\t'
'expected to be `{}` based on pixel type {}'.format(
i, len(nitf_details.img_headers), img_header.NBPP, exp_nbpp, pixel_type))
if len(img_header.Bands) != 2:
valid_images = False
logger.error('SICD: image segment at index {} of {} does not have two (I/Q or M/P) bands'.format(
i, len(nitf_details.img_headers)))
continue
if pixel_type == 'AMP8I_PHS8I':
if img_header.Bands[0].ISUBCAT.strip() != 'M' and img_header.Bands[1].ISUBCAT.strip() != 'P':
valid_images = False
logger.error(
'SICD: pixel_type is {}, image segment at index {} of {}\n\t'
'has bands with ISUBCAT {}, expected ("M", "P")'.format(
pixel_type, i, len(nitf_details.img_headers),
(img_header.Bands[0].ISUBCAT.strip(), img_header.Bands[1].ISUBCAT.strip())))
else:
if img_header.Bands[0].ISUBCAT.strip() != 'I' and img_header.Bands[1].ISUBCAT.strip() != 'Q':
valid_images = False
logger.error(
'SICD: pixel_type is {}, image segment at index {} of {}\n\t'
'has bands with ISUBCAT {}, expected ("I", "Q")'.format(
pixel_type, i, len(nitf_details.img_headers),
(img_header.Bands[0].ISUBCAT.strip(), img_header.Bands[1].ISUBCAT.strip())))
return valid_images
if isinstance(nitf_details, str):
if not os.path.isfile(nitf_details):
raise ValueError('Got string input, but it is not a valid path')
nitf_details = NITFDetails(nitf_details)
if not isinstance(nitf_details, NITFDetails):
raise TypeError(
'Input is expected to be a path to a NITF file, or a NITFDetails object instance')
# find the sicd header
sicd_xml_string, des_header = check_data_extension_headers()
# check that the sicd and header are valid
valid_sicd_des, the_sicd = check_sicd_data_extension(nitf_details, des_header, sicd_xml_string)
# check that the image segments all make sense compared to the sicd structure
valid_img = check_image_data()
all_valid = valid_sicd_des & valid_img
if valid_img:
try:
reader = SICDReader(nitf_details.file_name)
except Exception as e:
logger.exception(
'SICD: All image segments appear viable for the SICD,\n\t'
'but SICDReader construction failed')
return all_valid
def check_file(file_name):
"""
Check the SICD validity for the given file SICD (i.e. appropriately styled NITF)
or xml file containing the SICD structure alone.
Parameters
----------
file_name : str|SICDDetails
Returns
-------
bool
"""
if isinstance(file_name, str):
if not os.path.isfile(file_name):
raise ValueError('Got string input, but it is not a valid path')
# check if this is just an xml file
with open(file_name, 'rb') as fi:
initial_bits = fi.read(30)
if initial_bits.startswith(b'<?xml') or initial_bits.startswith(b'<SICD'):
sicd_xml = fi.read()
return _evaluate_xml_string_validity(sicd_xml)[0]
return check_sicd_file(file_name)
if __name__ == '__main__':
parser = argparse.ArgumentParser('SICD Consistency')
parser.add_argument('file_name')
parser.add_argument(
'-l', '--level', default='WARNING',
choices=['INFO', 'WARNING', 'ERROR'], help="Logging level")
config = parser.parse_args()
logging.basicConfig(level=config.level)
logger.setLevel(config.level)
validity = check_file(config.file_name)
if validity:
logger.info('\nSICD: {} has been validated with no errors'.format(config.file_name))
else:
logger.error('\nSICD: {} has apparent errors'.format(config.file_name))
sys.exit(int(validity))
|
6a48d112bbf220a68ed2b3ded6bece904fb6c1aa
|
0577a46d8d28e1fd8636893bbdd2b18270bb8eb8
|
/chromium/chrome/test/enterprise/e2e/connector/common/realtime_reporting_ui_test.py
|
f37b210209c96cf15b44f33234bb37b9bbbd646e
|
[
"BSD-3-Clause"
] |
permissive
|
ric2b/Vivaldi-browser
|
388a328b4cb838a4c3822357a5529642f86316a5
|
87244f4ee50062e59667bf8b9ca4d5291b6818d7
|
refs/heads/master
| 2022-12-21T04:44:13.804535
| 2022-12-17T16:30:35
| 2022-12-17T16:30:35
| 86,637,416
| 166
| 41
|
BSD-3-Clause
| 2021-03-31T18:49:30
| 2017-03-29T23:09:05
| null |
UTF-8
|
Python
| false
| false
| 2,160
|
py
|
realtime_reporting_ui_test.py
|
# Copyright 2021 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
from absl import app
from pywinauto.application import Application
from selenium import webdriver
from test_util import create_chrome_webdriver
from test_util import getElementFromShadowRoot
UnsafePageLink = "http://testsafebrowsing.appspot.com/s/malware.html"
UnsafeDownloadLink = "http://testsafebrowsing.appspot.com/s/badrep.exe"
def visit(window, url):
"""Visit a specific URL through pywinauto.Application.
SafeBrowsing intercepts HTTP requests & hangs WebDriver.get(), which prevents
us from getting the page source. Using pywinauto to visit the pages instead.
"""
window.Edit.set_edit_text(url).type_keys("%{ENTER}")
time.sleep(10)
def main(argv):
exclude_switches = ["disable-background-networking"]
chrome_options = webdriver.ChromeOptions()
chrome_options.add_experimental_option("excludeSwitches", exclude_switches)
driver = create_chrome_webdriver(chrome_options=chrome_options)
try:
app = Application(backend="uia")
app.connect(title_re='.*Chrome|.*Chromium')
window = app.top_window()
# Wait for Chrome to download SafeBrowsing lists in the background.
# There's no trigger to force this operation or synchronize on it, but quick
# experiments have shown 3-4 minutes in most cases, so 5 should be plenty.
time.sleep(60 * 5)
# Verify Policy status legend in chrome://policy page
policy_url = "chrome://policy"
driver.get(policy_url)
driver.find_element_by_id('reload-policies').click
# Give the page 2 seconds to render the legend
time.sleep(2)
status_box = driver.find_element_by_css_selector("status-box")
el = getElementFromShadowRoot(driver, status_box, "fieldset")
deviceId = el.find_element_by_class_name(
'machine-enrollment-device-id').text
visit(window, UnsafePageLink)
visit(window, UnsafeDownloadLink)
print('\nDeviceId:' + deviceId.strip())
except Exception as error:
print(error)
finally:
driver.quit()
if __name__ == '__main__':
app.run(main)
|
fb05f6b3d95328ee611441f30ec08282bdc4eab3
|
b3537e704ebd1e2c7a8e9e3ed9d02fc92cbf788b
|
/pokemon_v2/migrations/0009_pokemontypepast.py
|
2c8a43097d5f2c215d0c1b6eed5079d58f8842d7
|
[
"BSD-3-Clause"
] |
permissive
|
PokeAPI/pokeapi
|
559299b292f3af19b23f4e1cad7656e60bb1bd08
|
264c86563ed29b46cf4e7757cdf2b4e748910d8a
|
refs/heads/master
| 2023-08-08T11:51:24.707776
| 2023-08-02T08:29:14
| 2023-08-02T08:29:14
| 27,534,934
| 3,347
| 1,078
|
BSD-3-Clause
| 2023-09-06T02:26:58
| 2014-12-04T10:17:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,924
|
py
|
0009_pokemontypepast.py
|
# Generated by Django 2.1.11 on 2021-02-06 22:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("pokemon_v2", "0008_auto_20201123_2045"),
]
operations = [
migrations.CreateModel(
name="PokemonTypePast",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("slot", models.IntegerField()),
(
"generation",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="pokemontypepast",
to="pokemon_v2.Generation",
),
),
(
"pokemon",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="pokemontypepast",
to="pokemon_v2.Pokemon",
),
),
(
"type",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="pokemontypepast",
to="pokemon_v2.Type",
),
),
],
options={
"abstract": False,
},
),
]
|
732597812daa19340fd30487f535b4af90d8beda
|
72a637d960d3a4a51e8a3be3737b1748111627e1
|
/trimesh/voxel/runlength.py
|
6b911a378e83d6e749abc42c0d255c55b57f8aec
|
[
"MIT"
] |
permissive
|
mikedh/trimesh
|
f9a2fef82a9614f9f15a5748eddbff26ef675c15
|
a2f89a6917d69e76914b09c7864acea3a5193f47
|
refs/heads/main
| 2023-09-01T02:54:03.797482
| 2023-08-24T19:47:33
| 2023-08-24T19:47:33
| 11,745,275
| 2,512
| 601
|
MIT
| 2023-09-14T18:54:59
| 2013-07-29T17:25:42
|
Python
|
UTF-8
|
Python
| false
| false
| 20,338
|
py
|
runlength.py
|
"""
Numpy encode/decode/utility implementations for run length encodings.
# Run Length Encoded Features
Encoding/decoding functions for run length encoded data.
We include code for two variations:
* run length encoding (RLE)
* binary run length encdoing (BRLE)
RLE stores sequences of repeated values as the value followed by its count, e.g.
```python
dense_to_rle([5, 5, 3, 2, 2, 2, 2, 6]) == [5, 2, 3, 1, 2, 4, 6, 1]
```
i.e. the value `5` is repeated `2` times, then `3` is repeated `1` time, `2` is
repeated `4` times and `6` is repeated `1` time.
BRLE is an optimized form for when the stored values can only be `0` or `1`.
This means we only need to save the counts, and assume the values alternate
(starting at `0`).
```python
dense_to_brle([1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0]) == \
[0, 2, 4, 7, 2]
```
i.e. the value zero occurs `0` times, followed by `2` ones, `4` zeros, `7` ones
and `2` zeros.
Sequences with counts exceeding the data type's maximum value have to be
handled carefully. For example, the `uint8` encoding of 300 zeros
(`uint8` has a max value of 255) is:
* RLE: `[0, 255, 0, 45]` (`0` repeated `255` times + `0` repeated `45` times)
* BRLE: `[255, 0, 45, 0]` (`255` zeros + `0` ones + `45` zeros + `0` ones)
This module contains implementations of various RLE/BRLE operations.
"""
import functools
import numpy as np
def brle_length(brle):
"""Optimized implementation of `len(brle_to_dense(brle))`"""
return np.sum(brle)
def rle_length(rle):
"""Optimized implementation of `len(rle_to_dense(rle_to_brle(rle)))`"""
return np.sum(rle[1::2])
def rle_to_brle(rle, dtype=None):
"""
Convert run length encoded (RLE) value/counts to BRLE.
RLE data is stored in a rank 1 array with each pair giving:
(value, count)
e.g. the RLE encoding of [4, 4, 4, 1, 1, 6] is [4, 3, 1, 2, 6, 1].
Parameters
----------
rle : (n,) int
Run length encoded data
Returns
----------
equivalent binary run length encoding. a list if dtype is None,
otherwise brle_to_brle is called on that list before returning.
Raises
----------
ValueError
If any of the even counts of `rle` are not zero or 1.
"""
curr_val = 0
out = [0]
acc = 0
for value, count in np.reshape(rle, (-1, 2)):
acc += count
if value not in (0, 1):
raise ValueError(
"Invalid run length encoding for conversion to BRLE")
if value == curr_val:
out[-1] += count
else:
out.append(int(count))
curr_val = value
if len(out) % 2:
out.append(0)
if dtype is not None:
out = brle_to_brle(out, dtype=dtype)
return out
def brle_logical_not(brle):
"""
Get the BRLE encoding of the `logical_not`ed dense form of `brle`.
Equivalent to `dense_to_brle(np.logical_not(brle_to_dense(brle)))` but
highly optimized - just pads brle with a 0 on each end (or strips is
existing endpoints are both zero).
Parameters
----------
brle: rank 1 int array of binary run length encoded data
Returns
----------
rank 1 int array of binary run length encoded data corresponding to
element-wise not of the input.
"""
if brle[0] or brle[-1]:
return np.pad(brle, [1, 1], mode='constant')
else:
return brle[1:-1]
def merge_brle_lengths(lengths):
"""Inverse of split_long_brle_lengths."""
if len(lengths) == 0:
return []
out = [int(lengths[0])]
accumulating = False
for length in lengths[1:]:
if accumulating:
out[-1] += length
accumulating = False
else:
if length == 0:
accumulating = True
else:
out.append(int(length))
return out
def split_long_brle_lengths(lengths, dtype=np.int64):
"""
Split lengths that exceed max dtype value.
Lengths `l` are converted into [max_val, 0] * l // max_val + [l % max_val]
e.g. for dtype=np.uint8 (max_value == 255)
```
split_long_brle_lengths([600, 300, 2, 6], np.uint8) == \
[255, 0, 255, 0, 90, 255, 0, 45, 2, 6]
```
"""
lengths = np.asarray(lengths)
max_val = np.iinfo(dtype).max
bad_length_mask = lengths > max_val
if np.any(bad_length_mask):
# there are some bad lengths
nl = len(lengths)
repeats = np.asarray(lengths) // max_val
remainders = (lengths % max_val).astype(dtype)
lengths = np.concatenate(
[np.array([max_val, 0] * repeat + [remainder], dtype=dtype)
for repeat, remainder in zip(repeats, remainders)])
lengths = lengths.reshape((np.sum(repeats) * 2 + nl,)).astype(dtype)
return lengths
elif lengths.dtype != dtype:
return lengths.astype(dtype)
else:
return lengths
def dense_to_brle(dense_data, dtype=np.int64):
"""
Get the binary run length encoding of `dense_data`.
Parameters
----------
dense_data: rank 1 bool array of data to encode.
dtype: numpy int type.
Returns
----------
Binary run length encoded rank 1 array of dtype `dtype`.
Raises
----------
ValuError if dense_data is not a rank 1 bool array.
"""
if dense_data.dtype != bool:
raise ValueError("`dense_data` must be bool")
if len(dense_data.shape) != 1:
raise ValueError("`dense_data` must be rank 1.")
n = len(dense_data)
starts = np.r_[0, np.flatnonzero(dense_data[1:] != dense_data[:-1]) + 1]
lengths = np.diff(np.r_[starts, n])
lengths = split_long_brle_lengths(lengths, dtype=dtype)
if dense_data[0]:
lengths = np.pad(lengths, [1, 0], mode='constant')
return lengths
_ft = np.array([False, True], dtype=bool)
def brle_to_dense(brle_data, vals=None):
"""Decode binary run length encoded data to dense.
Parameters
----------
brle_data: BRLE counts of False/True values
vals: if not `None`, a length 2 array/list/tuple with False/True substitute
values, e.g. brle_to_dense([2, 3, 1, 0], [7, 9]) == [7, 7, 9, 9, 9, 7]
Returns
----------
rank 1 dense data of dtype `bool if vals is None else vals.dtype`
Raises
----------
ValueError if vals it not None and shape is not (2,)
"""
if vals is None:
vals = _ft
else:
vals = np.asarray(vals)
if vals.shape != (2,):
raise ValueError("vals.shape must be (2,), got %s" % (vals.shape))
ft = np.repeat(
_ft[np.newaxis, :], (len(brle_data) + 1) // 2, axis=0).flatten()
return np.repeat(ft[:len(brle_data)], brle_data).flatten()
def rle_to_dense(rle_data, dtype=np.int64):
"""Get the dense decoding of the associated run length encoded data."""
values, counts = np.split(np.reshape(rle_data, (-1, 2)), 2, axis=-1)
if dtype is not None:
values = np.asanyarray(values, dtype=dtype)
try:
result = np.repeat(np.squeeze(values, axis=-1),
np.squeeze(counts, axis=-1))
except TypeError:
# on windows it sometimes fails to cast data type
result = np.repeat(np.squeeze(values.astype(np.int64), axis=-1),
np.squeeze(counts.astype(np.int64), axis=-1))
return result
def dense_to_rle(dense_data, dtype=np.int64):
"""Get run length encoding of the provided dense data."""
n = len(dense_data)
starts = np.r_[0, np.flatnonzero(dense_data[1:] != dense_data[:-1]) + 1]
lengths = np.diff(np.r_[starts, n])
values = dense_data[starts]
values, lengths = split_long_rle_lengths(values, lengths, dtype=dtype)
out = np.stack((values, lengths), axis=1)
return out.flatten()
def split_long_rle_lengths(values, lengths, dtype=np.int64):
"""
Split long lengths in the associated run length encoding.
e.g.
```python
split_long_rle_lengths([5, 300, 2, 12], np.uint8) == [5, 255, 5, 45, 2, 12]
```
Parameters
----------
values: values column of run length encoding, or `rle[::2]`
lengths: counts in run length encoding, or `rle[1::2]`
dtype: numpy data type indicating the maximum value.
Returns
----------
values, lengths associated with the appropriate splits. `lengths` will be
of type `dtype`, while `values` will be the same as the value passed in.
"""
max_length = np.iinfo(dtype).max
lengths = np.asarray(lengths)
repeats = lengths // max_length
if np.any(repeats):
repeats += 1
remainder = lengths % max_length
values = np.repeat(values, repeats)
lengths = np.empty(len(repeats), dtype=dtype)
lengths.fill(max_length)
lengths = np.repeat(lengths, repeats)
lengths[np.cumsum(repeats) - 1] = remainder
elif lengths.dtype != dtype:
lengths = lengths.astype(dtype)
return values, lengths
def merge_rle_lengths(values, lengths):
"""Inverse of split_long_rle_lengths except returns normal python lists."""
ret_values = []
ret_lengths = []
curr = None
for value, length in zip(values, lengths):
if length == 0:
continue
if value == curr:
ret_lengths[-1] += length
else:
curr = value
ret_lengths.append(int(length))
ret_values.append(value)
return ret_values, ret_lengths
def brle_to_rle(brle, dtype=np.int64):
if len(brle) % 2 == 1:
brle = np.concatenate([brle, [0]])
lengths = brle
values = np.tile(_ft, len(brle) // 2)
return rle_to_rle(
np.stack((values, lengths), axis=1).flatten(), dtype=dtype)
def brle_to_brle(brle, dtype=np.int64):
"""
Almost the identity function.
Checks for possible merges and required splits.
"""
return split_long_brle_lengths(merge_brle_lengths(brle), dtype=dtype)
def rle_to_rle(rle, dtype=np.int64):
"""
Almost the identity function.
Checks for possible merges and required splits.
"""
values, lengths = np.reshape(rle, (-1, 2)).T
values, lengths = merge_rle_lengths(values, lengths)
values, lengths = split_long_rle_lengths(values, lengths, dtype=dtype)
return np.stack((values, lengths), axis=1).flatten()
def _unsorted_gatherer(indices, sorted_gather_fn):
if not isinstance(indices, np.ndarray):
indices = np.array(indices, copy=False)
order = np.argsort(indices)
ordered_indices = indices[order]
def f(data, dtype=None):
result = np.empty(
len(order), dtype=dtype or getattr(
data, 'dtype', None))
result[order] = tuple(sorted_gather_fn(data, ordered_indices))
return result
return f
def sorted_rle_gather_1d(rle_data, ordered_indices):
"""
Gather brle_data at ordered_indices.
This is equivalent to `rle_to_dense(brle_data)[ordered_indices]` but avoids
the decoding.
Parameters
----------
brle_data: iterable of run-length-encoded data.
ordered_indices: iterable of ints in ascending order.
Returns
----------
`brle_data` iterable of values at the dense indices, same length as
ordered indices.
"""
data_iter = iter(rle_data)
index_iter = iter(ordered_indices)
try:
index = next(index_iter)
except StopIteration:
return
start = 0
while True:
while start <= index:
try:
value = next(data_iter)
start += next(data_iter)
except StopIteration:
raise IndexError(
'Index %d out of range of raw_values length %d'
% (index, start))
try:
while index < start:
yield value
index = next(index_iter)
except StopIteration:
break
def rle_mask(rle_data, mask):
"""
Perform masking of the input run-length data.
Parameters
----------
rle_data: iterable of run length encoded data
mask: iterable of bools corresponding to the dense mask.
Returns
----------
iterable of dense values of rle_data wherever mask is True.
"""
data_iter = iter(rle_data)
mask_iter = iter(mask)
while True:
try:
value = next(data_iter)
count = next(data_iter)
except StopIteration:
break
for _ in range(count):
m = next(mask_iter)
if m:
yield value
def brle_mask(rle_data, mask):
"""
Perform masking of the input binary run-length data.
Parameters
----------
brle_data: iterable of binary run length encoded data
mask: iterable of bools corresponding to the dense mask.
Returns
----------
iterable dense values of brle_data wherever mask is True.
"""
data_iter = iter(rle_data)
mask_iter = iter(mask)
value = True
while True:
try:
value = not value
count = next(data_iter)
except StopIteration:
break
for _ in range(count):
m = next(mask_iter)
if m:
yield value
def rle_gatherer_1d(indices):
"""
Get a gather function at the given indices.
Because gathering on RLE data requires sorting, for instances where
gathering at the same indices on different RLE data this can save the
sorting process.
If only gathering on a single RLE iterable, use `rle_gather_1d`.
Parameters
----------
indices: iterable of integers
Returns
----------
gather function, mapping `(rle_data, dtype=None) -> values`.
`values` will have the same length as `indices` and dtype provided,
or rle_data.dtype if no dtype is provided.
"""
return _unsorted_gatherer(indices, sorted_rle_gather_1d)
def rle_gather_1d(rle_data, indices, dtype=None):
"""
Gather RLE data values at the provided dense indices.
This is equivalent to `rle_to_dense(rle_data)[indices]` but the
implementation does not require the construction of the dense array.
If indices is known to be in order, use `sorted_gather_1d`.
Parameters
----------
rle_data: run length encoded data
indices: dense indices
dtype: numpy dtype. If not provided, uses rle_data.dtype
Returns
----------
numpy array, dense data at indices, same length as indices and dtype as
rle_data
"""
return rle_gatherer_1d(indices)(rle_data, dtype=dtype)
def sorted_brle_gather_1d(brle_data, ordered_indices):
"""
Gather brle_data at ordered_indices.
This is equivalent to `brle_to_dense(brle_data)[ordered_indices]` but
avoids the decoding.
Parameters
----------
raw_data: iterable of run-length-encoded data.
ordered_indices: iterable of ints in ascending order.
Returns
----------
`raw_data` iterable of values at the dense indices, same length as
ordered indices.
"""
data_iter = iter(brle_data)
index_iter = iter(ordered_indices)
try:
index = next(index_iter)
except StopIteration:
return
start = 0
value = True
while True:
while start <= index:
try:
value = not value
start += next(data_iter)
except StopIteration:
raise IndexError(
'Index %d out of range of raw_values length %d'
% (index, start))
try:
while index < start:
yield value
index = next(index_iter)
except StopIteration:
break
def brle_gatherer_1d(indices):
"""
Get a gather function at the given indices.
Because gathering on BRLE data requires sorting, for instances where
gathering at the same indices on different RLE data this can save the
sorting process.
If only gathering on a single RLE iterable, use `brle_gather_1d`.
Parameters
----------
indices: iterable of integers
Returns
----------
gather function, mapping `(rle_data, dtype=None) -> values`.
`values` will have the same length as `indices` and dtype provided,
or rle_data.dtype if no dtype is provided.
"""
return functools.partial(
_unsorted_gatherer(indices, sorted_brle_gather_1d), dtype=bool)
def brle_gather_1d(brle_data, indices):
"""
Gather BRLE data values at the provided dense indices.
This is equivalent to `rle_to_dense(rle_data)[indices]` but the
implementation does not require the construction of the dense array.
If indices is known to be in order, use `sorted_brle_gather_1d`.
Parameters
----------
rle_data: run length encoded data
indices: dense indices
Returns
----------
numpy array, dense data at indices, same length as indices and dtype as
rle_data
"""
return brle_gatherer_1d(indices)(brle_data)
def brle_reverse(brle_data):
"""Equivalent to dense_to_brle(brle_to_dense(brle_data)[-1::-1])."""
if len(brle_data) % 2 == 0:
brle_data = np.concatenate([brle_data, [0]], axis=0)
end = -1 if brle_data[-1] == 0 else None
return brle_data[-1:end:-1]
def rle_reverse(rle_data):
"""Get the rle encoding of the reversed dense array."""
if not isinstance(rle_data, np.ndarray):
rle_data = np.array(rle_data, copy=False)
rle_data = np.reshape(rle_data, (-1, 2))
rle_data = rle_data[-1::-1]
return np.reshape(rle_data, (-1,))
def rle_to_sparse(rle_data):
"""Get dense indices associated with non-zeros."""
indices = []
values = []
it = iter(rle_data)
index = 0
try:
while True:
value = next(it)
counts = next(it)
end = index + counts
if value:
indices.append(np.arange(index, end, dtype=np.int64))
values.append(np.repeat(value, counts))
index = end
except StopIteration:
pass
if len(indices) == 0:
assert len(values) == 0
return indices, values
indices = np.concatenate(indices)
values = np.concatenate(values)
return indices, values
def brle_to_sparse(brle_data, dtype=np.int64):
ends = np.cumsum(brle_data)
indices = [np.arange(s, e, dtype=dtype) for s, e in
zip(ends[::2], ends[1::2])]
return np.concatenate(indices)
def rle_strip(rle_data):
"""
Remove leading and trailing zeros.
Parameters
----------
rle_data: run length encoded data
Returns
----------
(stripped_rle_data, padding)
stripped_rle_data: rle data without any leading or trailing zeros
padding: 2-element dense padding
"""
rle_data = np.reshape(rle_data, (-1, 2))
start = 0
final_i = len(rle_data)
for i, (val, count) in enumerate(rle_data):
if val and count > 0:
final_i = i
break
else:
start += count
end = 0
final_j = len(rle_data)
for j, (val, count) in enumerate(rle_data[::-1]):
if val and count > 0:
final_j = j
break
else:
end += count
rle_data = rle_data[
final_i:None if final_j == 0 else -final_j].reshape((-1,))
return rle_data, (start, end)
def brle_strip(brle_data):
"""
Remove leading and trailing zeros.
Parameters
----------
brle_data: binary run length encoded data.
Returns
----------
(stripped_brle_data, padding)
stripped_brle_data: rle data without any leading or trailing zeros
padding: 2-element dense padding
"""
start = 0
val = True
final_i = len(brle_data)
for i, count in enumerate(brle_data):
val = not val
if val and count > 0:
final_i = i
break
else:
start += count
end = 0
final_j = len(brle_data)
val = bool(len(brle_data) % 2)
for j, count in enumerate(brle_data[::-1]):
val = not val
if val and count > 0:
final_j = j
break
else:
end += count
brle_data = brle_data[final_i:None if final_j == 0 else -final_j]
brle_data = np.concatenate([[0], brle_data])
return brle_data, (start, end)
|
39e8ded112f32998994429c3a40a83a3c31f66db
|
18078013baa3d4972f5deeb41faa7ad00cff1475
|
/moses/latentgan/model.py
|
69b43022827cfcbfd8686b2cadaea683753b474e
|
[
"MIT"
] |
permissive
|
molecularsets/moses
|
946e7457e0b95a15799e585ab59ba3bd859555e3
|
dd7ed6ab38e23afd3ef5371d67939a1760bd8599
|
refs/heads/master
| 2023-08-11T09:38:40.574516
| 2023-06-22T15:28:31
| 2023-06-22T15:28:31
| 159,494,117
| 727
| 245
|
MIT
| 2023-06-22T15:28:33
| 2018-11-28T11:51:17
|
Python
|
UTF-8
|
Python
| false
| false
| 7,385
|
py
|
model.py
|
import os
import sys
import numpy as np
import torch
import torch.nn as nn
from torch.utils import data
import torch.autograd as autograd
from rdkit import Chem
class LatentGAN(nn.Module):
def __init__(self, vocabulary, config):
super(LatentGAN, self).__init__()
self.vocabulary = vocabulary
self.Generator = Generator(
data_shape=(1, config.latent_vector_dim))
self.model_version = config.heteroencoder_version
self.Discriminator = Discriminator(
data_shape=(1, config.latent_vector_dim))
self.sample_decoder = None
self.model_loaded = False
self.new_batch_size = 256
# init params
cuda = True if torch.cuda.is_available() else False
if cuda:
self.Discriminator.cuda()
self.Generator.cuda()
self.Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
def forward(self, n_batch):
out = self.sample(n_batch)
return out
def encode_smiles(self, smiles_in, encoder=None):
model = load_model(model_version=encoder)
# MUST convert SMILES to binary mols for the model to accept them
# (it re-converts them to SMILES internally)
mols_in = [Chem.rdchem.Mol.ToBinary(Chem.MolFromSmiles(smiles))
for smiles in smiles_in]
latent = model.transform(model.vectorize(mols_in))
return latent.tolist()
def compute_gradient_penalty(self, real_samples,
fake_samples, discriminator):
"""Calculates the gradient penalty loss for WGAN GP"""
# Random weight term for interpolation between real and fake samples
alpha = self.Tensor(np.random.random((real_samples.size(0), 1)))
# Get random interpolation between real and fake samples
interpolates = (alpha * real_samples +
((1 - alpha) * fake_samples)).requires_grad_(True)
d_interpolates = discriminator(interpolates)
fake = self.Tensor(real_samples.shape[0], 1).fill_(1.0)
# Get gradient w.r.t. interpolates
gradients = autograd.grad(
outputs=d_interpolates,
inputs=interpolates,
grad_outputs=fake,
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
@property
def device(self):
return next(self.parameters()).device
def sample(self, n_batch, max_length=100):
if not self.model_loaded:
# Checking for first batch of model to only load model once
print('Heteroencoder for Sampling Loaded')
self.sample_decoder = load_model(model_version=self.model_version)
# load generator
self.Gen = self.Generator
self.Gen.eval()
self.D = self.Discriminator
torch.no_grad()
cuda = True if torch.cuda.is_available() else False
if cuda:
self.Gen.cuda()
self.D.cuda()
self.S = Sampler(generator=self.Gen)
self.model_loaded = True
if n_batch <= 256:
print('Batch size of {} detected. Decoding '
'performs poorly when Batch size != 256. \
Setting batch size to 256'.format(n_batch))
# Sampling performs very poorly on default sampling batch parameters.
# This takes care of the default scenario.
if n_batch == 32:
n_batch = 256
latent = self.S.sample(n_batch)
latent = latent.detach().cpu().numpy()
if self.new_batch_size != n_batch:
# The batch decoder creates a new instance of the decoder
# every time a new batch size is given, e.g. for the
# final batch of the generation.
self.new_batch_size = n_batch
self.sample_decoder.batch_input_length = self.new_batch_size
lat = latent
sys.stdout.flush()
smi, _ = self.sample_decoder.predict_batch(lat, temp=0)
return smi
def load_model(model_version=None):
from ddc_pub import ddc_v3 as ddc
# Import model
currentDirectory = os.getcwd()
if model_version == 'chembl':
model_name = 'chembl_pretrained'
elif model_version == 'moses':
model_name = 'moses_pretrained'
elif model_version == 'new':
model_name = 'new_model'
else:
print('No predefined model of that name found. '
'using the default pre-trained MOSES heteroencoder')
model_name = 'moses_pretrained'
path = '{}/moses/latentgan/heteroencoder_models/{}' \
.format(currentDirectory, model_name)
print("Loading heteroencoder model titled {}".format(model_version))
print("Path to model file: {}".format(path))
model = ddc.DDC(model_name=path)
sys.stdout.flush()
return model
class LatentMolsDataset(data.Dataset):
def __init__(self, latent_space_mols):
self.data = latent_space_mols
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
class Discriminator(nn.Module):
def __init__(self, data_shape=(1, 512)):
super(Discriminator, self).__init__()
self.data_shape = data_shape
self.model = nn.Sequential(
nn.Linear(int(np.prod(self.data_shape)), 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 1),
)
def forward(self, mol):
validity = self.model(mol)
return validity
class Generator(nn.Module):
def __init__(self, data_shape=(1, 512), latent_dim=None):
super(Generator, self).__init__()
self.data_shape = data_shape
# latent dim of the generator is one of the hyperparams.
# by default it is set to the prod of data_shapes
self.latent_dim = int(np.prod(self.data_shape)) \
if latent_dim is None else latent_dim
def block(in_feat, out_feat, normalize=True):
layers = [nn.Linear(in_feat, out_feat)]
if normalize:
layers.append(nn.BatchNorm1d(out_feat, 0.8))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
self.model = nn.Sequential(
*block(self.latent_dim, 128, normalize=False),
*block(128, 256),
*block(256, 512),
*block(512, 1024),
nn.Linear(1024, int(np.prod(self.data_shape))),
# nn.Tanh() # expecting latent vectors to be not normalized
)
def forward(self, z):
out = self.model(z)
return out
class Sampler(object):
"""
Sampling the mols the generator.
All scripts should use this class for sampling.
"""
def __init__(self, generator: Generator):
self.G = generator
def sample(self, n):
# Sample noise as generator input
z = torch.cuda.FloatTensor(np.random.uniform(-1, 1,
(n, self.G.latent_dim)))
# Generate a batch of mols
return self.G(z)
|
99e3941beb0e7d3d091a78e83ddadad61b19fbc8
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/tools/bisect_repackage/bisect_repackage_utils.py
|
97f7eb0aacee913d9a328bbe603ada2f75706323
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 18,783
|
py
|
bisect_repackage_utils.py
|
# Copyright 2016 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Set of basic operations/utilities that are used by repacakging tool.
These functions were mostly imported from build/scripts/common/chromium_utils
and build/scripts/common/slave_utils.
"""
from __future__ import print_function
import errno
import os
import re
import shutil
import subprocess
import sys
import time
import zipfile
CREDENTIAL_ERROR_MESSAGE = ('You are attempting to access protected data with '
'no configured credentials')
class ExternalError(Exception):
pass
def IsWindows():
return sys.platform == 'cygwin' or sys.platform.startswith('win')
def IsLinux():
return sys.platform.startswith('linux')
def IsMac():
return sys.platform.startswith('darwin')
WIN_LINK_FUNC = None
try:
if sys.platform.startswith('win'):
import ctypes
# There's 4 possibilities on Windows for links:
# 1. Symbolic file links;
# 2. Symbolic directory links;
# 3. Hardlinked files;
# 4. Junctioned directories.
# (Hardlinked directories don't really exist.)
#
# 7-Zip does not handle symbolic file links as we want (it puts the
# content of the link, not what it refers to, and reports "CRC Error" on
# extraction). It does work as expected for symbolic directory links.
# Because the majority of the large files are in the root of the staging
# directory, we do however need to handle file links, so we do this with
# hardlinking. Junctioning requires a huge whack of code, so we take the
# slightly odd tactic of using #2 and #3, but not #1 and #4. That is,
# hardlinks for files, but symbolic links for directories.
def _WIN_LINK_FUNC(src, dst):
print('linking %s -> %s' % (src, dst))
if os.path.isdir(src):
if not ctypes.windll.kernel32.CreateSymbolicLinkA(
str(dst), str(os.path.abspath(src)), 1):
raise ctypes.WinError()
else:
if not ctypes.windll.kernel32.CreateHardLinkA(str(dst), str(src), 0):
raise ctypes.WinError()
WIN_LINK_FUNC = _WIN_LINK_FUNC
except ImportError:
# If we don't have ctypes or aren't on Windows, leave WIN_LINK_FUNC as None.
pass
class PathNotFound(Exception):
pass
def IsGitCommitHash(regex_match):
"""Checks if match is correct SHA1 hash."""
matched_re = re.match(r'^[0-9,A-F]{40}$', regex_match.upper())
if matched_re: return True
return False
def IsCommitPosition(regex_match):
"""Checks if match is correct revision(Cp number) format."""
matched_re = re.match(r'^[0-9]{6}$', regex_match)
if matched_re: return True
return False
def MaybeMakeDirectory(*path):
"""Creates an entire path, if it doesn't already exist."""
file_path = os.path.join(*path)
try:
os.makedirs(file_path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def RemovePath(*path):
"""Removes the file or directory at 'path', if it exists."""
file_path = os.path.join(*path)
if os.path.exists(file_path):
if os.path.isdir(file_path):
RemoveDirectory(file_path)
else:
RemoveFile(file_path)
def MoveFile(path, new_path):
"""Moves the file located at 'path' to 'new_path', if it exists."""
try:
RemoveFile(new_path)
os.rename(path, new_path)
except OSError, e:
if e.errno != errno.ENOENT:
raise
def RemoveFile(*path):
"""Removes the file located at 'path', if it exists."""
file_path = os.path.join(*path)
try:
os.remove(file_path)
except OSError, e:
if e.errno != errno.ENOENT:
raise
def CheckDepotToolsInPath():
delimiter = ';' if sys.platform.startswith('win') else ':'
path_list = os.environ['PATH'].split(delimiter)
for path in path_list:
if path.rstrip(os.path.sep).endswith('depot_tools'):
return path
return None
def RunGsutilCommand(args):
gsutil_path = CheckDepotToolsInPath()
if gsutil_path is None:
print ('Follow the instructions in this document '
'http://dev.chromium.org/developers/how-tos/install-depot-tools'
' to install depot_tools and then try again.')
sys.exit(1)
gsutil_path = os.path.join(gsutil_path, 'third_party', 'gsutil', 'gsutil')
gsutil = subprocess.Popen([sys.executable, gsutil_path] + args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=None)
stdout, stderr = gsutil.communicate()
if gsutil.returncode:
if (re.findall(r'status[ |=]40[1|3]', stderr) or
stderr.startswith(CREDENTIAL_ERROR_MESSAGE)):
print('Follow these steps to configure your credentials and try'
' running the bisect-builds.py again.:\n'
' 1. Run "python %s config" and follow its instructions.\n'
' 2. If you have a @google.com account, use that account.\n'
' 3. For the project-id, just enter 0.' % gsutil_path)
sys.exit(1)
else:
raise Exception('Error running the gsutil command: %s' % stderr)
return stdout
def GSutilList(bucket):
query = '%s/' %(bucket)
stdout = RunGsutilCommand(['ls', query])
return [url[len(query):].strip('/') for url in stdout.splitlines()]
def GSUtilDownloadFile(src, dst):
command = ['cp', src, dst]
return RunGsutilCommand(command)
def GSUtilCopy(source, dest):
if not source.startswith('gs://') and not source.startswith('file://'):
source = 'file://' + source
if not dest.startswith('gs://') and not dest.startswith('file://'):
dest = 'file://' + dest
command = ['cp']
command.extend([source, dest])
return RunGsutilCommand(command)
def RunCommand(cmd, cwd=None):
"""Runs the given command and returns the exit code.
Args:
cmd: list of command arguments.
cwd: working directory to execute the command, or None if the current
working directory should be used.
Returns:
The exit code of the command.
"""
process = subprocess.Popen(cmd, cwd=cwd)
process.wait()
return process.returncode
def CopyFileToDir(src_path, dest_dir, dest_fn=None, link_ok=False):
"""Copies the file found at src_path to the dest_dir directory, with metadata.
If dest_fn is specified, the src_path is copied to that name in dest_dir,
otherwise it is copied to a file of the same name.
Raises PathNotFound if either the file or the directory is not found.
"""
# Verify the file and directory separately so we can tell them apart and
# raise PathNotFound rather than shutil.copyfile's IOError.
if not os.path.isfile(src_path):
raise PathNotFound('Unable to find file %s' % src_path)
if not os.path.isdir(dest_dir):
raise PathNotFound('Unable to find dir %s' % dest_dir)
src_file = os.path.basename(src_path)
if dest_fn:
# If we have ctypes and the caller doesn't mind links, use that to
# try to make the copy faster on Windows. http://crbug.com/418702.
if link_ok and WIN_LINK_FUNC:
WIN_LINK_FUNC(src_path, os.path.join(dest_dir, dest_fn))
else:
shutil.copy2(src_path, os.path.join(dest_dir, dest_fn))
else:
shutil.copy2(src_path, os.path.join(dest_dir, src_file))
def RemoveDirectory(*path):
"""Recursively removes a directory, even if it's marked read-only.
Remove the directory located at *path, if it exists.
shutil.rmtree() doesn't work on Windows if any of the files or directories
are read-only, which svn repositories and some .svn files are. We need to
be able to force the files to be writable (i.e., deletable) as we traverse
the tree.
Even with all this, Windows still sometimes fails to delete a file, citing
a permission error (maybe something to do with antivirus scans or disk
indexing). The best suggestion any of the user forums had was to wait a
bit and try again, so we do that too. It's hand-waving, but sometimes it
works. :/
"""
file_path = os.path.join(*path)
if not os.path.exists(file_path):
return
if sys.platform == 'win32':
# Give up and use cmd.exe's rd command.
file_path = os.path.normcase(file_path)
for _ in xrange(3):
print('RemoveDirectory running %s' % (' '.join(
['cmd.exe', '/c', 'rd', '/q', '/s', file_path])))
if not subprocess.call(['cmd.exe', '/c', 'rd', '/q', '/s', file_path]):
break
print(' Failed')
time.sleep(3)
return
def RemoveWithRetry_non_win(rmfunc, path):
if os.path.islink(path):
return os.remove(path)
else:
return rmfunc(path)
remove_with_retry = RemoveWithRetry_non_win
def RmTreeOnError(function, path, excinfo):
r"""This works around a problem whereby python 2.x on Windows has no ability
to check for symbolic links. os.path.islink always returns False. But
shutil.rmtree will fail if invoked on a symbolic link whose target was
deleted before the link. E.g., reproduce like this:
> mkdir test
> mkdir test\1
> mklink /D test\current test\1
> python -c "import chromium_utils; chromium_utils.RemoveDirectory('test')"
To avoid this issue, we pass this error-handling function to rmtree. If
we see the exact sort of failure, we ignore it. All other failures we re-
raise.
"""
exception_type = excinfo[0]
exception_value = excinfo[1]
# If shutil.rmtree encounters a symbolic link on Windows, os.listdir will
# fail with a WindowsError exception with an ENOENT errno (i.e., file not
# found). We'll ignore that error. Note that WindowsError is not defined
# for non-Windows platforms, so we use OSError (of which it is a subclass)
# to avoid lint complaints about an undefined global on non-Windows
# platforms.
if (function is os.listdir) and issubclass(exception_type, OSError):
if exception_value.errno == errno.ENOENT:
# File does not exist, and we're trying to delete, so we can ignore the
# failure.
print('WARNING: Failed to list %s during rmtree. Ignoring.\n' % path)
else:
raise
else:
raise
for root, dirs, files in os.walk(file_path, topdown=False):
# For POSIX: making the directory writable guarantees removability.
# Windows will ignore the non-read-only bits in the chmod value.
os.chmod(root, 0770)
for name in files:
remove_with_retry(os.remove, os.path.join(root, name))
for name in dirs:
remove_with_retry(lambda p: shutil.rmtree(p, onerror=RmTreeOnError),
os.path.join(root, name))
remove_with_retry(os.rmdir, file_path)
def MakeZip(output_dir, archive_name, file_list, file_relative_dir, dir_in_zip,
raise_error=True, remove_archive_directory=True, strip_files=None,
ignore_sub_folder=False):
"""Packs files into a new zip archive.
Files are first copied into a directory within the output_dir named for
the archive_name, which will be created if necessary and emptied if it
already exists. The files are then then packed using archive names
relative to the output_dir. That is, if the zipfile is unpacked in place,
it will create a directory identical to the new archive_name directory, in
the output_dir. The zip file will be named as the archive_name, plus
'.zip'.
Args:
output_dir: Absolute path to the directory in which the archive is to
be created.
archive_dir: Subdirectory of output_dir holding files to be added to
the new zipfile.
file_list: List of paths to files or subdirectories, relative to the
file_relative_dir.
file_relative_dir: Absolute path to the directory containing the files
and subdirectories in the file_list.
dir_in_zip: Directory where the files are archived into.
raise_error: Whether to raise a PathNotFound error if one of the files in
the list is not found.
remove_archive_directory: Whether to remove the archive staging directory
before copying files over to it.
strip_files: List of executable files to strip symbols when zipping
Returns:
A tuple consisting of (archive_dir, zip_file_path), where archive_dir
is the full path to the newly created archive_name subdirectory.
Raises:
PathNotFound if any of the files in the list is not found, unless
raise_error is False, in which case the error will be ignored.
"""
start_time = time.clock()
# Collect files into the archive directory.
archive_dir = os.path.join(output_dir, dir_in_zip)
print('output_dir: %s, archive_name: %s' % (output_dir, archive_name))
print('archive_dir: %s, remove_archive_directory: %s, exists: %s' %
(archive_dir, remove_archive_directory, os.path.exists(archive_dir)))
if remove_archive_directory and os.path.exists(archive_dir):
# Move it even if it's not a directory as expected. This can happen with
# FILES.cfg archive creation where we create an archive staging directory
# that is the same name as the ultimate archive name.
if not os.path.isdir(archive_dir):
print('Moving old "%s" file to create same name directory.' % archive_dir)
previous_archive_file = '%s.old' % archive_dir
MoveFile(archive_dir, previous_archive_file)
else:
print('Removing %s' % archive_dir)
RemoveDirectory(archive_dir)
print('Now, os.path.exists(%s): %s' % (archive_dir,
os.path.exists(archive_dir)))
MaybeMakeDirectory(archive_dir)
for needed_file in file_list:
needed_file = needed_file.rstrip()
# These paths are relative to the file_relative_dir. We need to copy
# them over maintaining the relative directories, where applicable.
src_path = os.path.join(file_relative_dir, needed_file)
dirname, basename = os.path.split(needed_file)
try:
if os.path.isdir(src_path):
if WIN_LINK_FUNC:
WIN_LINK_FUNC(src_path, os.path.join(archive_dir, needed_file))
else:
shutil.copytree(src_path, os.path.join(archive_dir, needed_file),
symlinks=True)
elif dirname != '' and basename != '':
dest_dir = os.path.join(archive_dir, dirname)
MaybeMakeDirectory(dest_dir)
CopyFileToDir(src_path, dest_dir, basename, link_ok=True)
if strip_files and basename in strip_files:
cmd = ['strip', os.path.join(dest_dir, basename)]
RunCommand(cmd)
else:
CopyFileToDir(src_path, archive_dir, basename, link_ok=True)
if strip_files and basename in strip_files:
cmd = ['strip', os.path.join(archive_dir, basename)]
RunCommand(cmd)
except PathNotFound:
if raise_error:
raise
end_time = time.clock()
print(
'Took %f seconds to create archive directory.' % (end_time - start_time))
# Pack the zip file.
output_file = os.path.join(output_dir, '%s.zip' % archive_name)
previous_file = os.path.join(output_dir, '%s_old.zip' % archive_name)
MoveFile(output_file, previous_file)
# If we have 7z, use that as it's much faster. See http://crbug.com/418702.
windows_zip_cmd = None
if os.path.exists('C:\\Program Files\\7-Zip\\7z.exe'):
windows_zip_cmd = ['C:\\Program Files\\7-Zip\\7z.exe', 'a', '-y', '-mx1']
# On Windows we use the python zip module; on Linux and Mac, we use the zip
# command as it will handle links and file bits (executable). Which is much
# easier then trying to do that with ZipInfo options.
start_time = time.clock()
if IsWindows() and not windows_zip_cmd:
print('Creating %s' % output_file)
def _Addfiles(to_zip_file, dirname, files_to_add):
for this_file in files_to_add:
archive_name = this_file
this_path = os.path.join(dirname, this_file)
if os.path.isfile(this_path):
# Store files named relative to the outer output_dir.
archive_name = this_path.replace(output_dir + os.sep, '')
if os.path.getsize(this_path) == 0:
compress_method = zipfile.ZIP_STORED
else:
compress_method = zipfile.ZIP_DEFLATED
to_zip_file.write(this_path, archive_name, compress_method)
print('Adding %s' % archive_name)
zip_file = zipfile.ZipFile(output_file, 'w', zipfile.ZIP_DEFLATED,
allowZip64=True)
try:
os.path.walk(archive_dir, _Addfiles, zip_file)
finally:
zip_file.close()
else:
if IsMac() or IsLinux():
zip_cmd = ['zip', '-yr1']
else:
zip_cmd = windows_zip_cmd
if ignore_sub_folder:
zip_cmd.extend(['-j'])
saved_dir = os.getcwd()
os.chdir(os.path.dirname(archive_dir))
command = zip_cmd + [output_file, os.path.basename(archive_dir)]
result = RunCommand(command)
os.chdir(saved_dir)
if result and raise_error:
raise ExternalError('zip failed: %s => %s' %
(str(command), result))
end_time = time.clock()
print('Took %f seconds to create zip.' % (end_time - start_time))
return (archive_dir, output_file)
def ExtractZip(filename, output_dir, extract_file_list=[], verbose=True):
"""Extract the zip archive in the output directory."""
MaybeMakeDirectory(output_dir)
# On Linux and Mac, we use the unzip command as it will
# handle links and file bits (executable), which is much
# easier then trying to do that with ZipInfo options.
#
# The Mac Version of unzip unfortunately does not support Zip64, whereas
# the python module does, so we have to fallback to the python zip module
# on Mac if the filesize is greater than 4GB.
#
# On Windows, try to use 7z if it is installed, otherwise fall back to python
# zip module and pray we don't have files larger than 512MB to unzip.
unzip_cmd = None
if ((IsMac() and os.path.getsize(filename) < 4 * 1024 * 1024 * 1024)
or IsLinux()):
unzip_cmd = ['unzip', '-o']
elif IsWindows() and os.path.exists('C:\\Program Files\\7-Zip\\7z.exe'):
unzip_cmd = ['C:\\Program Files\\7-Zip\\7z.exe', 'x', '-y']
if unzip_cmd:
# Make sure path is absolute before changing directories.
filepath = os.path.abspath(filename)
saved_dir = os.getcwd()
os.chdir(output_dir)
command = unzip_cmd + [filepath]
command.extend(extract_file_list)
result = RunCommand(command)
os.chdir(saved_dir)
if result:
raise ExternalError('unzip failed: %s => %s' % (str(command), result))
else:
assert IsWindows() or IsMac()
zf = zipfile.ZipFile(filename)
# TODO(hinoka): This can be multiprocessed.
for name in zf.namelist():
if verbose:
print('Extracting %s' % name)
zf.extract(name, output_dir)
if IsMac():
# Restore permission bits.
os.chmod(os.path.join(output_dir, name),
zf.getinfo(name).external_attr >> 16L)
|
35bd1e030808b5351a9884a38793d29980460b43
|
b37bb98ec70de1b030ad1706235589e17ff1443d
|
/tests/test_pymannkendall.py
|
62eda3af09ba1143c4c4843c6f696f7af2786d4c
|
[
"MIT"
] |
permissive
|
mmhs013/pyMannKendall
|
cacd8ae37f9466b83216da02d184c29dd80fb44f
|
1ff32de4c4f4b50e1d922763ae88b3cb677c52b0
|
refs/heads/master
| 2023-01-20T22:02:35.844213
| 2023-01-14T08:33:22
| 2023-01-14T08:33:22
| 174,495,388
| 200
| 54
|
MIT
| 2021-06-25T18:11:49
| 2019-03-08T08:09:22
|
Python
|
UTF-8
|
Python
| false
| false
| 17,387
|
py
|
test_pymannkendall.py
|
# In this unit test file, we check all functions with randomly generated No trendy, trendy, arbitrary data. Those results are compared with R package - modifiedmk, fume, rkt, trend.
import os
import pytest
import numpy as np
import pymannkendall as mk
@pytest.fixture
def NoTrendData():
# Generate 360 random value with the same number
NoTrendData = np.ones(360)*np.random.randint(10)
return NoTrendData
@pytest.fixture
def NoTrend2dData():
# Generate 2 dimensional 360 random value with same number
NoTrend2dData = np.ones((360,2))*np.random.randint(10)
return NoTrend2dData
@pytest.fixture
def TrendData():
# Generate random 360 trendy data with approx. slope 1
TrendData = np.arange(360).astype(np.float) + np.random.rand(360)/10**13
return TrendData
@pytest.fixture
def arbitrary_1d_data():
# Generate arbitrary 360 data
arbitrary_1d_data = np.array([ 32., 20., 25., 189., 240., 193., 379., 278., 301., 0., 0.,
82., 0., 4., np.nan, np.nan, 121., 234., 360., 262., 120., 30.,
11., 1., 7., 3., 31., 31., 355., 102., 248., 274., 308.,
np.nan, 5., 26., 11., 16., 6., 48., 388., 539., 431., 272.,
404., 186., 0., 2., 0., 4., 1., 54., 272., 459., 235.,
164., 365., 135., 2., np.nan, np.nan, 4., 0., 128., 210., 163.,
446., 225., 462., 467., 19., 13., 0., 3., 17., 132., 178.,
338., 525., 623., 145., 31., 19., 3., 0., 29., 25., 87.,
259., 756., 486., 180., 292., 43., 92., 1., 0., 16., 2.,
0., 130., 253., 594., 111., 273., 30., 0., 4., 0., 27.,
24., 41., 292., 378., 499., 265., 320., 227., 4., 0., 4.,
14., 8., 48., 416., 240., 404., 207., 733., 105., 0., 112.,
0., 14., 0., 30., 140., 202., 289., 159., 424., 106., 3.,
0., 65., 3., 14., 58., 268., 466., 432., 266., 240., 95.,
1., 0., 10., 26., 4., 114., 94., 289., 173., 208., 263.,
156., 5., 0., 16., 16., 14., 0., 111., 475., 534., 432.,
471., 117., 70., 1., 3., 28., 7., 401., 184., 283., 338.,
171., 335., 176., 0., 0., 10., 11., 9., 140., 102., 208.,
298., 245., 220., 29., 2., 27., 10., 13., 26., 84., 143.,
367., 749., 563., 283., 353., 10., 0., 0., 0., 0., 9.,
246., 265., 343., 429., 168., 133., 17., 0., 18., 35., 76.,
158., 272., 250., 190., 289., 466., 84., 0., 0., 0., 0.,
0., 22., 217., 299., 185., 115., 344., 203., 8., np.nan, np.nan,
0., 5., 284., 123., 254., 476., 496., 326., 27., 20., 0.,
4., 53., 72., 113., 214., 364., 219., 220., 156., 264., 0.,
13., 0., 0., 45., 90., 137., 638., 529., 261., 206., 251.,
0., 0., 5., 9., 58., 72., 138., 130., 471., 328., 356.,
523., 0., 1., 0., 0., 12., 143., 193., 184., 192., 138.,
174., 69., 1., 0., 0., 18., 25., 28., 92., 732., 320.,
256., 302., 131., 15., 0., 27., 0., 22., 20., 213., 393.,
474., 374., 109., 159., 0., 0., 0., 3., 3., 49., 205.,
128., 194., 570., 169., 89., 0., 0., 0., 0., 0., 26.,
185., 286., 92., 225., 244., 190., 3., 20.])
return arbitrary_1d_data
@pytest.fixture
def arbitrary_2d_data():
# Generate arbitrary 80, 2 dimensional data
arbitrary_2d_data = np.array([[ 490., 458.], [ 540., 469.], [ 220., 4630.], [ 390., 321.], [ 450., 541.],
[ 230., 1640.], [ 360., 1060.], [ 460., 264.], [ 430., 665.], [ 430., 680.],
[ 620., 650.], [ 460., np.nan], [ 450., 380.], [ 580., 325.], [ 350., 1020.],
[ 440., 460.], [ 530., 583.], [ 380., 777.], [ 440., 1230.], [ 430., 565.],
[ 680., 533.], [ 250., 4930.], [np.nan, 3810.], [ 450., 469.], [ 500., 473.],
[ 510., 593.], [ 490., 500.], [ 700., 266.], [ 420., 495.], [ 710., 245.],
[ 430., 736.], [ 410., 508.], [ 700., 578.], [ 260., 4590.], [ 260., 4670.],
[ 500., 503.], [ 450., 469.], [ 500., 314.], [ 620., 432.], [ 670., 279.],
[np.nan, 542.], [ 470., 499.], [ 370., 741.], [ 410., 569.], [ 540., 360.],
[ 550., 513.], [ 220., 3910.], [ 460., 364.], [ 390., 472.], [ 550., 245.],
[ 320., np.nan], [ 570., 224.], [ 480., 342.], [ 520., 732.], [ 620., 240.],
[ 520., 472.], [ 430., 679.], [ 400., 1080.], [ 430., 920.], [ 490., 488.],
[ 560., np.nan], [ 370., 595.], [ 460., 295.], [ 390., 542.], [ 330., 1500.],
[ 350., 1080.], [ 480., 334.], [ 390., 423.], [ 500., 216.], [ 410., 366.],
[ 470., 750.], [ 280., 1260.], [ 510., 223.], [np.nan, 462.], [ 310., 7640.],
[ 230., 2340.], [ 470., 239.], [ 330., 1400.], [ 320., 3070.], [ 500., 244.]])
return arbitrary_2d_data
def test_sens_slope(NoTrendData, TrendData, arbitrary_1d_data):
# check with no trend data
NoTrendRes = mk.sens_slope(NoTrendData)
assert NoTrendRes.slope == 0.0
# check with trendy data
TrendRes = mk.sens_slope(TrendData)
assert TrendRes.slope == 1.0
assert round(TrendRes.intercept) == 0.0
result = mk.sens_slope(arbitrary_1d_data)
assert result.slope == -0.006369426751592357
assert result.intercept == 96.15286624203821
def test_seasonal_sens_slope(NoTrendData, TrendData, arbitrary_1d_data):
# check with no trend data
NoTrendRes = mk.seasonal_sens_slope(NoTrendData)
assert NoTrendRes.slope == 0.0
# check with trendy data
TrendRes = mk.seasonal_sens_slope(TrendData)
assert TrendRes.slope == 12.0
assert round(TrendRes.intercept) == 0.0
result = mk.seasonal_sens_slope(arbitrary_1d_data)
assert result.slope == -0.08695652173913043
assert result.intercept == 96.31159420289855
def test_original_test(NoTrendData, TrendData, arbitrary_1d_data):
# check with no trend data
NoTrendRes = mk.original_test(NoTrendData)
assert NoTrendRes.trend == 'no trend'
assert NoTrendRes.h == False
assert NoTrendRes.p == 1.0
assert NoTrendRes.z == 0
assert NoTrendRes.Tau == 0.0
assert NoTrendRes.s == 0.0
assert NoTrendRes.var_s == 0.0
# check with trendy data
TrendRes = mk.original_test(TrendData)
assert TrendRes.trend == 'increasing'
assert TrendRes.h == True
assert TrendRes.p == 0.0
assert TrendRes.Tau == 1.0
assert TrendRes.s == 64620.0
# check with arbitrary data
result = mk.original_test(arbitrary_1d_data)
assert result.trend == 'no trend'
assert result.h == False
assert result.p == 0.37591058740506833
assert result.z == -0.8854562842589916
assert result.Tau == -0.03153167653875869
assert result.s == -1959.0
assert result.var_s == 4889800.333333333
def test_hamed_rao_modification_test(NoTrendData, TrendData, arbitrary_1d_data):
# check with no trend data
NoTrendRes = mk.hamed_rao_modification_test(NoTrendData)
assert NoTrendRes.trend == 'no trend'
assert NoTrendRes.h == False
assert NoTrendRes.p == 1.0
assert NoTrendRes.z == 0
assert NoTrendRes.Tau == 0.0
assert NoTrendRes.s == 0.0
# check with trendy data
TrendRes = mk.hamed_rao_modification_test(TrendData)
assert TrendRes.trend == 'increasing'
assert TrendRes.h == True
assert TrendRes.p == 0.0
assert TrendRes.Tau == 1.0
assert TrendRes.s == 64620.0
# check with arbitrary data
result = mk.hamed_rao_modification_test(arbitrary_1d_data)
assert result.trend == 'decreasing'
assert result.h == True
assert result.p == 0.00012203829241275166
assert result.z == -3.8419950613710894
assert result.Tau == -0.03153167653875869
assert result.s == -1959.0
assert result.var_s == 259723.81316716125
def test_hamed_rao_modification_test_lag3(NoTrendData, TrendData, arbitrary_1d_data):
# check with no trend data
NoTrendRes = mk.hamed_rao_modification_test(NoTrendData, lag=3)
assert NoTrendRes.trend == 'no trend'
assert NoTrendRes.h == False
assert NoTrendRes.p == 1.0
assert NoTrendRes.z == 0
assert NoTrendRes.Tau == 0.0
assert NoTrendRes.s == 0.0
# check with trendy data
TrendRes = mk.hamed_rao_modification_test(TrendData, lag=3)
assert TrendRes.trend == 'increasing'
assert TrendRes.h == True
assert TrendRes.p == 0.0
assert TrendRes.Tau == 1.0
assert TrendRes.s == 64620.0
# check with arbitrary data
result = mk.hamed_rao_modification_test(arbitrary_1d_data, lag=3)
assert result.trend == 'no trend'
assert result.h == False
assert result.p == 0.6037112685123898
assert result.z == -0.5190709455046154
assert result.Tau == -0.03153167653875869
assert result.s == -1959.0
assert result.var_s == 14228919.889368296
def test_yue_wang_modification_test(NoTrendData, TrendData, arbitrary_1d_data):
# check with no trend data
NoTrendRes = mk.yue_wang_modification_test(NoTrendData)
assert NoTrendRes.trend == 'no trend'
assert NoTrendRes.h == False
assert NoTrendRes.p == 1.0
assert NoTrendRes.z == 0
assert NoTrendRes.Tau == 0.0
assert NoTrendRes.s == 0.0
# check with trendy data
TrendRes = mk.yue_wang_modification_test(TrendData)
assert TrendRes.trend == 'increasing'
assert TrendRes.h == True
assert TrendRes.p == 0.0
assert TrendRes.Tau == 1.0
assert TrendRes.s == 64620.0
# check with arbitrary data
result = mk.yue_wang_modification_test(arbitrary_1d_data)
assert result.trend == 'decreasing'
assert result.h == True
np.testing.assert_allclose(result.p, 0.008401398144858296)
np.testing.assert_allclose(result.z, -2.6354977553857504)
assert result.Tau == -0.03153167653875869
assert result.s == -1959.0
np.testing.assert_allclose(result.var_s, 551950.4269211816)
def test_yue_wang_modification_test_lag1(NoTrendData, TrendData, arbitrary_1d_data):
# check with no trend data
NoTrendRes = mk.yue_wang_modification_test(NoTrendData, lag=1)
assert NoTrendRes.trend == 'no trend'
assert NoTrendRes.h == False
assert NoTrendRes.p == 1.0
assert NoTrendRes.z == 0
assert NoTrendRes.Tau == 0.0
assert NoTrendRes.s == 0.0
# check with trendy data
TrendRes = mk.yue_wang_modification_test(TrendData, lag=1)
assert TrendRes.trend == 'increasing'
assert TrendRes.h == True
assert TrendRes.p == 0.0
assert TrendRes.Tau == 1.0
assert TrendRes.s == 64620.0
# check with arbitrary data
result = mk.yue_wang_modification_test(arbitrary_1d_data, lag=1)
assert result.trend == 'no trend'
assert result.h == False
np.testing.assert_allclose(result.p, 0.5433112864060043)
np.testing.assert_allclose(result.z, -0.6078133313683783)
assert result.Tau == -0.03153167653875869
assert result.s == -1959.0
np.testing.assert_allclose(result.var_s, 10377313.384506395)
def test_pre_whitening_modification_test(NoTrendData, TrendData, arbitrary_1d_data):
# check with no trend data
NoTrendRes = mk.pre_whitening_modification_test(NoTrendData)
assert NoTrendRes.trend == 'no trend'
assert NoTrendRes.h == False
assert NoTrendRes.p == 1.0
assert NoTrendRes.z == 0
assert NoTrendRes.Tau == 0.0
# check with trendy data
TrendRes = mk.pre_whitening_modification_test(TrendData)
assert TrendRes.trend == 'increasing'
assert TrendRes.h == True
assert TrendRes.p == 0.0
# check with arbitrary data
result = mk.pre_whitening_modification_test(arbitrary_1d_data)
assert result.trend == 'no trend'
assert result.h == False
assert result.p == 0.9212742990272651
assert result.z == -0.09882867695903437
assert result.Tau == -0.003545066045066045
assert result.s == -219.0
assert result.var_s == 4865719.0
def test_trend_free_pre_whitening_modification_test(NoTrendData, TrendData, arbitrary_1d_data):
# check with no trend data
NoTrendRes = mk.trend_free_pre_whitening_modification_test(NoTrendData)
assert NoTrendRes.trend == 'no trend'
assert NoTrendRes.h == False
assert NoTrendRes.p == 1.0
assert NoTrendRes.z == 0
assert NoTrendRes.Tau == 0.0
# check with trendy data
TrendRes = mk.trend_free_pre_whitening_modification_test(TrendData)
assert TrendRes.trend == 'increasing'
assert TrendRes.h == True
assert TrendRes.p == 0.0
assert TrendRes.Tau == 1.0
# check with arbitrary data
result = mk.trend_free_pre_whitening_modification_test(arbitrary_1d_data)
assert result.trend == 'no trend'
assert result.h == False
assert result.p == 0.7755465706913385
assert result.z == -0.28512735834365455
assert result.Tau == -0.010198135198135198
assert result.s == -630.0
assert result.var_s == 4866576.0
def test_seasonal_test(NoTrendData, TrendData, arbitrary_1d_data):
# check with no trend data
NoTrendRes = mk.seasonal_test(NoTrendData, period=12)
assert NoTrendRes.trend == 'no trend'
assert NoTrendRes.h == False
assert NoTrendRes.p == 1.0
assert NoTrendRes.z == 0
assert NoTrendRes.Tau == 0.0
assert NoTrendRes.s == 0.0
# check with trendy data
TrendRes = mk.seasonal_test(TrendData, period=12)
assert TrendRes.trend == 'increasing'
assert TrendRes.h == True
assert TrendRes.p == 0.0
assert TrendRes.Tau == 1.0
assert TrendRes.s == 5220.0
# check with arbitrary data
result = mk.seasonal_test(arbitrary_1d_data, period=12)
assert result.trend == 'decreasing'
assert result.h == True
assert result.p == 0.03263834596177739
assert result.z == -2.136504114534638
assert result.Tau == -0.0794979079497908
assert result.s == -399.0
assert result.var_s == 34702.333333333336
def test_regional_test(NoTrend2dData,arbitrary_2d_data):
# check with no trend data
NoTrendRes = mk.regional_test(NoTrend2dData)
assert NoTrendRes.trend == 'no trend'
assert NoTrendRes.h == False
assert NoTrendRes.p == 1.0
assert NoTrendRes.z == 0
assert NoTrendRes.Tau == 0.0
assert NoTrendRes.s == 0.0
assert NoTrendRes.var_s == 0.0
assert NoTrendRes.slope == 0.0
# check with arbitrary data
result = mk.regional_test(arbitrary_2d_data)
assert result.trend == 'no trend'
assert result.h == False
assert result.p == 0.2613018311185482
assert result.z == -1.1233194854000186
assert result.Tau == -0.06185919343814081
assert result.s == -362.0
assert result.var_s == 103278.0
assert result.slope == -0.680446465481604
def test_correlated_multivariate_test(NoTrend2dData,arbitrary_2d_data):
# check with no trend data
NoTrendRes = mk.correlated_multivariate_test(NoTrend2dData)
assert NoTrendRes.trend == 'no trend'
assert NoTrendRes.h == False
assert NoTrendRes.Tau == 0.0
assert NoTrendRes.s == 0.0
assert NoTrendRes.var_s == 0.0
assert NoTrendRes.slope == 0.0
# check with arbitrary data
result = mk.correlated_multivariate_test(arbitrary_2d_data)
assert result.trend == 'no trend'
assert result.h == False
assert result.p == 0.05777683185903615
assert result.z == -1.8973873659119118
assert result.Tau == -0.05868196964087375
assert result.s == -317.0
assert result.var_s == 27913.000000000007
assert result.slope == -0.680446465481604
def test_correlated_seasonal_test(NoTrendData, TrendData, arbitrary_1d_data):
# check with no trend data
NoTrendRes = mk.correlated_seasonal_test(NoTrendData, period=12)
assert NoTrendRes.trend == 'no trend'
assert NoTrendRes.h == False
assert NoTrendRes.Tau == 0.0
assert NoTrendRes.s == 0.0
# check with trendy data
TrendRes = mk.correlated_seasonal_test(TrendData, period=12)
assert TrendRes.trend == 'increasing'
assert TrendRes.h == True
assert round(TrendRes.p) == 0.0
assert TrendRes.Tau == 1.0
assert TrendRes.s == 5220.0
# check with arbitrary data
result = mk.correlated_seasonal_test(arbitrary_1d_data, period=12)
assert result.trend == 'no trend'
assert result.h == False
assert result.p == 0.06032641537423844
assert result.z == -1.878400366918792
assert result.Tau == -0.10054347826086957
assert result.s == -333.0
assert result.var_s == 31427.666666666664
def test_partial_test(NoTrend2dData,arbitrary_2d_data):
# check with no trend data
NoTrendRes = mk.partial_test(NoTrend2dData)
assert NoTrendRes.trend == 'no trend'
assert NoTrendRes.h == False
assert NoTrendRes.p == 1.0
assert NoTrendRes.z == 0
assert NoTrendRes.Tau == 0.0
assert NoTrendRes.s == 0.0
assert NoTrendRes.var_s == 5205500.0
# check with arbitrary data
result = mk.partial_test(arbitrary_2d_data)
assert result.trend == 'no trend'
assert result.h == False
assert result.p == 0.06670496348739152
assert result.z == -1.8336567432191642
assert result.Tau == -0.07552758237689744
assert result.s == -282.53012319329804
assert result.var_s == 23740.695506142725
assert result.slope == -0.5634920634920635
assert result.intercept == 471.9761904761905
|
2d5d14871d72e8c29b3f8792c24ba4be988bde6c
|
c001930958cb94f8b91b1f734108671f1db9e9f1
|
/dash/development/component_generator.py
|
35824ba69276bb33883ec6342d92b725fe20d33f
|
[
"MIT"
] |
permissive
|
plotly/dash
|
73c752135937e27975071fbd144e3fb21618e7b4
|
6eaf2e17c25f7ca1847c41aafeb18e87c586cb9f
|
refs/heads/dev
| 2023-08-30T21:21:06.056499
| 2023-08-29T16:49:04
| 2023-08-29T16:49:04
| 33,702,544
| 20,553
| 2,355
|
MIT
| 2023-08-31T20:51:14
| 2015-04-10T01:53:08
|
Python
|
UTF-8
|
Python
| false
| false
| 8,471
|
py
|
component_generator.py
|
from collections import OrderedDict
import json
import sys
import subprocess
import shlex
import os
import argparse
import shutil
import functools
import pkg_resources
import yaml
from ._r_components_generation import write_class_file
from ._r_components_generation import generate_exports
from ._py_components_generation import generate_class_file
from ._py_components_generation import generate_imports
from ._py_components_generation import generate_classes_files
from ._jl_components_generation import generate_struct_file
from ._jl_components_generation import generate_module
reserved_words = [
"UNDEFINED",
"REQUIRED",
"to_plotly_json",
"available_properties",
"available_wildcard_properties",
"_.*",
]
class _CombinedFormatter(
argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter
):
pass
# pylint: disable=too-many-locals, too-many-arguments, too-many-branches, too-many-statements
def generate_components(
components_source,
project_shortname,
package_info_filename="package.json",
ignore="^_",
rprefix=None,
rdepends="",
rimports="",
rsuggests="",
jlprefix=None,
metadata=None,
keep_prop_order=None,
max_props=None,
):
project_shortname = project_shortname.replace("-", "_").rstrip("/\\")
is_windows = sys.platform == "win32"
extract_path = pkg_resources.resource_filename("dash", "extract-meta.js")
reserved_patterns = "|".join(f"^{p}$" for p in reserved_words)
os.environ["NODE_PATH"] = "node_modules"
shutil.copyfile(
"package.json", os.path.join(project_shortname, package_info_filename)
)
if not metadata:
env = os.environ.copy()
# Ensure local node modules is used when the script is packaged.
env["MODULES_PATH"] = os.path.abspath("./node_modules")
cmd = shlex.split(
f'node {extract_path} "{ignore}" "{reserved_patterns}" {components_source}',
posix=not is_windows,
)
proc = subprocess.Popen( # pylint: disable=consider-using-with
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=is_windows,
env=env,
)
out, err = proc.communicate()
status = proc.poll()
if err:
print(err.decode(), file=sys.stderr)
if not out:
print(
f"Error generating metadata in {project_shortname} (status={status})",
file=sys.stderr,
)
sys.exit(1)
metadata = safe_json_loads(out.decode("utf-8"))
py_generator_kwargs = {}
if keep_prop_order is not None:
keep_prop_order = [
component.strip(" ") for component in keep_prop_order.split(",")
]
py_generator_kwargs["prop_reorder_exceptions"] = keep_prop_order
if max_props:
py_generator_kwargs["max_props"] = max_props
generator_methods = [functools.partial(generate_class_file, **py_generator_kwargs)]
if rprefix is not None or jlprefix is not None:
with open("package.json", "r", encoding="utf-8") as f:
pkg_data = safe_json_loads(f.read())
if rprefix is not None:
if not os.path.exists("man"):
os.makedirs("man")
if not os.path.exists("R"):
os.makedirs("R")
if os.path.isfile("dash-info.yaml"):
with open("dash-info.yaml", encoding="utf-8") as yamldata:
rpkg_data = yaml.safe_load(yamldata)
else:
rpkg_data = None
generator_methods.append(
functools.partial(write_class_file, prefix=rprefix, rpkg_data=rpkg_data)
)
if jlprefix is not None:
generator_methods.append(
functools.partial(generate_struct_file, prefix=jlprefix)
)
components = generate_classes_files(project_shortname, metadata, *generator_methods)
with open(
os.path.join(project_shortname, "metadata.json"), "w", encoding="utf-8"
) as f:
json.dump(metadata, f, indent=2)
generate_imports(project_shortname, components)
if rprefix is not None:
generate_exports(
project_shortname,
components,
metadata,
pkg_data,
rpkg_data,
rprefix,
rdepends,
rimports,
rsuggests,
)
if jlprefix is not None:
generate_module(project_shortname, components, metadata, pkg_data, jlprefix)
def safe_json_loads(s):
jsondata_unicode = json.loads(s, object_pairs_hook=OrderedDict)
if sys.version_info[0] >= 3:
return jsondata_unicode
return byteify(jsondata_unicode)
def component_build_arg_parser():
parser = argparse.ArgumentParser(
prog="dash-generate-components",
formatter_class=_CombinedFormatter,
description="Generate dash components by extracting the metadata "
"using react-docgen. Then map the metadata to Python classes.",
)
parser.add_argument("components_source", help="React components source directory.")
parser.add_argument(
"project_shortname", help="Name of the project to export the classes files."
)
parser.add_argument(
"-p",
"--package-info-filename",
default="package.json",
help="The filename of the copied `package.json` to `project_shortname`",
)
parser.add_argument(
"-i",
"--ignore",
default="^_",
help="Files/directories matching the pattern will be ignored",
)
parser.add_argument(
"--r-prefix",
help="Specify a prefix for Dash for R component names, write "
"components to R dir, create R package.",
)
parser.add_argument(
"--r-depends",
default="",
help="Specify a comma-separated list of R packages to be "
"inserted into the Depends field of the DESCRIPTION file.",
)
parser.add_argument(
"--r-imports",
default="",
help="Specify a comma-separated list of R packages to be "
"inserted into the Imports field of the DESCRIPTION file.",
)
parser.add_argument(
"--r-suggests",
default="",
help="Specify a comma-separated list of R packages to be "
"inserted into the Suggests field of the DESCRIPTION file.",
)
parser.add_argument(
"--jl-prefix",
help="Specify a prefix for Dash for R component names, write "
"components to R dir, create R package.",
)
parser.add_argument(
"-k",
"--keep-prop-order",
default=None,
help="Specify a comma-separated list of components which will use the prop "
"order described in the component proptypes instead of alphabetically reordered "
"props. Pass the 'ALL' keyword to have every component retain "
"its original prop order.",
)
parser.add_argument(
"--max-props",
type=int,
default=250,
help="Specify the max number of props to list in the component signature. "
"More props will still be shown in the docstring, and will still work when "
"provided as kwargs to the component. Python <3.7 only supports 255 args, "
"but you may also want to reduce further for improved readability at the "
"expense of auto-completion for the later props. Use 0 to include all props.",
)
return parser
def cli():
args = component_build_arg_parser().parse_args()
generate_components(
args.components_source,
args.project_shortname,
package_info_filename=args.package_info_filename,
ignore=args.ignore,
rprefix=args.r_prefix,
rdepends=args.r_depends,
rimports=args.r_imports,
rsuggests=args.r_suggests,
jlprefix=args.jl_prefix,
keep_prop_order=args.keep_prop_order,
max_props=args.max_props,
)
# pylint: disable=undefined-variable
def byteify(input_object):
if isinstance(input_object, dict):
return OrderedDict(
[(byteify(key), byteify(value)) for key, value in input_object.iteritems()]
)
if isinstance(input_object, list):
return [byteify(element) for element in input_object]
if isinstance(input_object, unicode): # noqa:F821
return input_object.encode("utf-8")
return input_object
if __name__ == "__main__":
cli()
|
5aa32809b73cdd815ddf1c0e053720beeefbcf7d
|
88dda5e76cef286c7db3ae7e5d1a32d28f7815a3
|
/reviewboard/reviews/tests/test_review_request_view_mixin.py
|
1a06aadedc561c26877dfe0c72dd6fc2fc251cc7
|
[
"MIT"
] |
permissive
|
reviewboard/reviewboard
|
f4d3bada08ba9d6ef53add2d1fdb82bd6cc63a1e
|
c3a991f1e9d7682239a1ab0e8661cee6da01d537
|
refs/heads/master
| 2023-08-31T09:03:14.170335
| 2023-08-30T08:22:43
| 2023-08-30T08:22:43
| 285,304
| 1,141
| 353
|
MIT
| 2023-06-07T16:51:02
| 2009-08-22T21:39:49
|
Python
|
UTF-8
|
Python
| false
| false
| 7,347
|
py
|
test_review_request_view_mixin.py
|
"""Unit tests for reviewboard.reviews.views.ReviewRequestViewMixin."""
from datetime import datetime
import pytz
from django.utils import timezone
from reviewboard.reviews.models import ReviewRequest
from reviewboard.reviews.models.review_request import ReviewRequestCloseInfo
from reviewboard.reviews.views import ReviewRequestViewMixin
from reviewboard.testing import TestCase
_local_timezone = pytz.timezone('US/Pacific')
class ReviewRequestViewMixinTests(TestCase):
"""Unit tests for reviewboard.reviews.views.ReviewRequestViewMixin."""
fixtures = ['test_users']
def test_get_review_request_status_html_with_submitted(self):
"""Testing ReviewRequestViewMixin.get_review_request_status_html
with status=SUBMITTED
"""
review_request = self.create_review_request(
status=ReviewRequest.SUBMITTED,
time_added=datetime(2018, 2, 10, 9, 23, 12, tzinfo=timezone.utc))
mixin = ReviewRequestViewMixin()
mixin.review_request = review_request
with timezone.override(_local_timezone):
html = mixin.get_review_request_status_html(
review_request_details=review_request,
close_info=ReviewRequestCloseInfo(
close_description='',
is_rich_text=False,
timestamp=datetime(2018, 2, 12, 14, 56, 00,
tzinfo=timezone.utc)))
self.assertHTMLEqual(
html,
'Created Feb. 10, 2018 and submitted '
'<time class="timesince" datetime="2018-02-12T06:56:00-08:00">'
'Feb. 12, 2018, 6:56 a.m.</time>')
def test_get_review_request_status_html_with_submitted_no_timestamp(self):
"""Testing ReviewRequestViewMixin.get_review_request_status_html
with status=SUBMITTED and no timestamp
"""
review_request = self.create_review_request(
status=ReviewRequest.SUBMITTED,
time_added=datetime(2018, 2, 10, 9, 23, 12, tzinfo=timezone.utc))
mixin = ReviewRequestViewMixin()
mixin.review_request = review_request
with timezone.override(_local_timezone):
html = mixin.get_review_request_status_html(
review_request_details=review_request,
close_info=ReviewRequestCloseInfo(
close_description='',
is_rich_text=False,
timestamp=None))
self.assertEqual(html, 'Created Feb. 10, 2018 and submitted')
def test_get_review_request_status_html_with_discarded(self):
"""Testing ReviewRequestViewMixin.get_review_request_status_html
with status=DISCARDED
"""
review_request = self.create_review_request(
status=ReviewRequest.DISCARDED,
time_added=datetime(2018, 2, 10, 9, 23, 12, tzinfo=timezone.utc))
mixin = ReviewRequestViewMixin()
mixin.review_request = review_request
with timezone.override(_local_timezone):
html = mixin.get_review_request_status_html(
review_request_details=review_request,
close_info=ReviewRequestCloseInfo(
close_description='',
is_rich_text=False,
timestamp=datetime(2018, 2, 12, 14, 56, 00,
tzinfo=timezone.utc)))
self.assertHTMLEqual(
html,
'Created Feb. 10, 2018 and discarded '
'<time class="timesince" datetime="2018-02-12T06:56:00-08:00">'
'Feb. 12, 2018, 6:56 a.m.</time>')
def test_get_review_request_status_html_with_discarded_no_timestamp(self):
"""Testing ReviewRequestViewMixin.get_review_request_status_html
with status=DISCARDED and no timestamp
"""
review_request = self.create_review_request(
status=ReviewRequest.DISCARDED,
time_added=datetime(2018, 2, 10, 9, 23, 12, tzinfo=timezone.utc))
mixin = ReviewRequestViewMixin()
mixin.review_request = review_request
with timezone.override(_local_timezone):
html = mixin.get_review_request_status_html(
review_request_details=review_request,
close_info=ReviewRequestCloseInfo(
close_description='',
is_rich_text=False,
timestamp=None))
self.assertEqual(html, 'Created Feb. 10, 2018 and discarded')
def test_get_review_request_status_html_with_pending_review(self):
"""Testing ReviewRequestViewMixin.get_review_request_status_html
with status=PENDING_REVIEW
"""
review_request = self.create_review_request(
status=ReviewRequest.PENDING_REVIEW,
time_added=datetime(2018, 2, 10, 9, 23, 12, tzinfo=timezone.utc),
last_updated=datetime(2018, 2, 10, 15, 19, 23,
tzinfo=timezone.utc))
mixin = ReviewRequestViewMixin()
mixin.review_request = review_request
with timezone.override(_local_timezone):
html = mixin.get_review_request_status_html(
review_request_details=review_request,
close_info=ReviewRequestCloseInfo(
close_description='',
is_rich_text=False,
timestamp=None))
self.assertHTMLEqual(
html,
'Created Feb. 10, 2018 and updated '
'<time class="timesince" datetime="2018-02-10T07:19:23-08:00">'
'Feb. 10, 2018, 7:19 a.m.</time>')
def test_get_review_request_status_html_with_extra_info(self):
"""Testing ReviewRequestViewMixin.get_review_request_status_html
with extra_info
"""
review_request = self.create_review_request(
status=ReviewRequest.PENDING_REVIEW,
time_added=datetime(2018, 2, 10, 9, 23, 12, tzinfo=timezone.utc),
last_updated=datetime(2018, 2, 10, 15, 19, 23,
tzinfo=timezone.utc))
mixin = ReviewRequestViewMixin()
mixin.review_request = review_request
with timezone.override(_local_timezone):
html = mixin.get_review_request_status_html(
review_request_details=review_request,
close_info=ReviewRequestCloseInfo(
close_description='',
is_rich_text=False,
timestamp=None),
extra_info=[
{
'text': '{var} updated at {timestamp}',
'timestamp': datetime(2018, 2, 11, 23, 32, 00,
tzinfo=timezone.utc),
'extra_vars': {
'var': 'Thingie',
},
},
])
self.assertHTMLEqual(
html,
'Created Feb. 10, 2018 and updated '
'<time class="timesince" datetime="2018-02-10T07:19:23-08:00">'
'Feb. 10, 2018, 7:19 a.m.</time>'
' — '
'Thingie updated at '
'<time class="timesince" datetime="2018-02-11T15:32:00-08:00">'
'Feb. 11, 2018, 3:32 p.m.</time>')
|
106267b8081ffcbc793b8c0c82e2b1013c554376
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/ios/tests/ShowBgpAllNeighborsPolicy/cli/equal/golden_output_expected.py
|
9d154babb51b27a996d474722245ca9fb5317375
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 419
|
py
|
golden_output_expected.py
|
expected_output = {
"vrf": {
"VRF1": {
"neighbor": {
"10.186.0.2": {
"address_family": {
"vpnv4 unicast": {
"nbr_af_route_map_name_in": "test",
"nbr_af_route_map_name_out": "test",
}
}
}
}
}
}
}
|
4566e574cc0468ceabb7f34518bff5614513865b
|
58cfad962e57b935e7782bb214a2008d689751d6
|
/xero_python/finance/models/contact_response.py
|
5b4ef8f43f2441ce7beb6b78d70db1af4d51a1fd
|
[
"MIT"
] |
permissive
|
XeroAPI/xero-python
|
ce43c060c216a42efd5f47159987468deb0e4622
|
07efa3bfc87a3bd08ba217dd2b642f6a3515ddff
|
refs/heads/master
| 2023-07-21T04:01:27.461727
| 2023-07-11T02:35:44
| 2023-07-11T02:35:44
| 240,158,613
| 109
| 42
|
MIT
| 2023-07-11T02:35:45
| 2020-02-13T02:17:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,674
|
py
|
contact_response.py
|
# coding: utf-8
"""
Xero Finance API
The Finance API is a collection of endpoints which customers can use in the course of a loan application, which may assist lenders to gain the confidence they need to provide capital. # noqa: E501
Contact: api@xero.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class ContactResponse(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"contact_id": "str", "contact_name": "str"}
attribute_map = {"contact_id": "contactId", "contact_name": "contactName"}
def __init__(self, contact_id=None, contact_name=None): # noqa: E501
"""ContactResponse - a model defined in OpenAPI""" # noqa: E501
self._contact_id = None
self._contact_name = None
self.discriminator = None
if contact_id is not None:
self.contact_id = contact_id
if contact_name is not None:
self.contact_name = contact_name
@property
def contact_id(self):
"""Gets the contact_id of this ContactResponse. # noqa: E501
Xero Identifier of contact # noqa: E501
:return: The contact_id of this ContactResponse. # noqa: E501
:rtype: str
"""
return self._contact_id
@contact_id.setter
def contact_id(self, contact_id):
"""Sets the contact_id of this ContactResponse.
Xero Identifier of contact # noqa: E501
:param contact_id: The contact_id of this ContactResponse. # noqa: E501
:type: str
"""
self._contact_id = contact_id
@property
def contact_name(self):
"""Gets the contact_name of this ContactResponse. # noqa: E501
Full name of contact/organisation # noqa: E501
:return: The contact_name of this ContactResponse. # noqa: E501
:rtype: str
"""
return self._contact_name
@contact_name.setter
def contact_name(self, contact_name):
"""Sets the contact_name of this ContactResponse.
Full name of contact/organisation # noqa: E501
:param contact_name: The contact_name of this ContactResponse. # noqa: E501
:type: str
"""
self._contact_name = contact_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.